code
stringlengths 2.5k
6.36M
| kind
stringclasses 2
values | parsed_code
stringlengths 0
404k
| quality_prob
float64 0
0.98
| learning_prob
float64 0.03
1
|
---|---|---|---|---|
<ul style="font-family:Times New Roman;font-size:20px;color: blueviolet">
<h1 align="center">Classification Model Building for UCI Bank Note Authentication Dataset</h1>
</ul>
---
<ul style="font-family:Times New Roman;font-size:15px;color: blueviolet">
<h1>Import libraries:</h1>
</ul>
```
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, recall_score, precision_score, classification_report
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from xgboost import XGBClassifier
from sklearn.ensemble import AdaBoostClassifier
import warnings
warnings.filterwarnings('ignore')
```
<ul style="font-family:Times New Roman;font-size:15px;color: blueviolet">
<h1>Load the Dataset:</h1>
</ul>
```
df = pd.read_csv('./Data/BankNote_Authentication.csv')
X = df.drop('class', axis=1)
y = df['class']
```
<ul style="font-family:Times New Roman;font-size:15px;color: blueviolet">
<h1>Split the Dataset for Training & Testing:</h1>
</ul>
```
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
```
<ul style="font-family:Times New Roman;font-size:15px;color: blueviolet">
<h1>Create Model Object for each Classification Algorithm:</h1>
</ul>
```
lr_clf = LogisticRegression(random_state=42)
svc = SVC(random_state=42)
dt_clf =DecisionTreeClassifier(random_state=42)
rf_clf = RandomForestClassifier(random_state=42)
gb_clf = GradientBoostingClassifier(random_state=42)
xgb_clf = XGBClassifier(random_state=42, eval_metric='mlogloss')
ada_clf = AdaBoostClassifier(random_state=42)
models = [lr_clf, svc, dt_clf, rf_clf, gb_clf, xgb_clf, ada_clf]
```
<ul style="font-family:Times New Roman;font-size:15px;color: blueviolet">
<h1>Train all the Models:</h1>
</ul>
```
warnings.simplefilter(action='ignore', category=FutureWarning)
model_name_list, accuracy_score_list, f1_score_list, recall_score_list, precision_score_list = [], [], [], [], []
for model in models:
model_name_list.append(str(model).split('(')[0])
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
accuracy_score_list.append(accuracy_score(y_pred, y_test))
f1_score_list.append(accuracy_score(y_pred, y_test))
recall_score_list.append(accuracy_score(y_pred, y_test))
precision_score_list.append(accuracy_score(y_pred, y_test))
```
<ul style="font-family:Times New Roman;font-size:15px;color: blueviolet">
<h1>Evaluate the Performance:</h1>
</ul>
```
model_evaluation_df = pd.DataFrame({'Model':model_name_list, 'Accuracy_Score':accuracy_score_list,
'f1_score':f1_score_list, 'Recall_Score':recall_score_list,
'Precision_Score': precision_score_list})
model_evaluation_df
```
<ul style="font-family:Times New Roman;font-size:15px;color: blueviolet">
<h1>Final Model:</h1>
</ul>
```
final_model = RandomForestClassifier(random_state=42)
final_model.fit(X_train, y_train)
y_pred = final_model.predict(X_test)
print(classification_report(y_pred, y_test))
```
<ul style="font-family:Times New Roman;font-size:15px;color: blueviolet">
<h1>Save the Model:</h1>
</ul>
```
import pickle
try:
pickle_out = open('classifier.pkl', 'wb')
pickle.dump(final_model, pickle_out)
finally:
pickle_out.close()
```
|
github_jupyter
|
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, recall_score, precision_score, classification_report
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from xgboost import XGBClassifier
from sklearn.ensemble import AdaBoostClassifier
import warnings
warnings.filterwarnings('ignore')
df = pd.read_csv('./Data/BankNote_Authentication.csv')
X = df.drop('class', axis=1)
y = df['class']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
lr_clf = LogisticRegression(random_state=42)
svc = SVC(random_state=42)
dt_clf =DecisionTreeClassifier(random_state=42)
rf_clf = RandomForestClassifier(random_state=42)
gb_clf = GradientBoostingClassifier(random_state=42)
xgb_clf = XGBClassifier(random_state=42, eval_metric='mlogloss')
ada_clf = AdaBoostClassifier(random_state=42)
models = [lr_clf, svc, dt_clf, rf_clf, gb_clf, xgb_clf, ada_clf]
warnings.simplefilter(action='ignore', category=FutureWarning)
model_name_list, accuracy_score_list, f1_score_list, recall_score_list, precision_score_list = [], [], [], [], []
for model in models:
model_name_list.append(str(model).split('(')[0])
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
accuracy_score_list.append(accuracy_score(y_pred, y_test))
f1_score_list.append(accuracy_score(y_pred, y_test))
recall_score_list.append(accuracy_score(y_pred, y_test))
precision_score_list.append(accuracy_score(y_pred, y_test))
model_evaluation_df = pd.DataFrame({'Model':model_name_list, 'Accuracy_Score':accuracy_score_list,
'f1_score':f1_score_list, 'Recall_Score':recall_score_list,
'Precision_Score': precision_score_list})
model_evaluation_df
final_model = RandomForestClassifier(random_state=42)
final_model.fit(X_train, y_train)
y_pred = final_model.predict(X_test)
print(classification_report(y_pred, y_test))
import pickle
try:
pickle_out = open('classifier.pkl', 'wb')
pickle.dump(final_model, pickle_out)
finally:
pickle_out.close()
| 0.4436 | 0.733929 |
# Tutorial about transforming LocData
Locan provides methods for transforming localization data sets into new LocData objects.
```
from pathlib import Path
%matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import locan as lc
lc.show_versions(system=False, dependencies=False, verbose=False)
```
## Spatially randomize a structured set of localizations
Assume that localizations are somehow structured throughout a region. Often it is helpful to compare analysis results to a similar dataset in which localizations are homogeneously Poisson distributed. A LocData object with this kind of data can be provided by the randomize function.
```
rng = np.random.default_rng(seed=1)
locdata = lc.simulate_Thomas(parent_intensity=1e-5, region=((0, 1000), (0, 1000)), cluster_mu=100, cluster_std=10, seed=rng)
locdata.print_summary()
locdata_random = lc.randomize(locdata, hull_region='bb', seed=rng)
fig, ax = plt.subplots(nrows=1, ncols=2)
locdata.data.plot.scatter(x='position_x', y='position_y', ax=ax[0], color='Blue', label='locdata')
locdata_random.data.plot.scatter(x='position_x', y='position_y', ax=ax[1], color='Blue', label='locdata')
plt.tight_layout()
plt.show()
print('Area of bounding box for structured data: {:.0f}'.format(locdata.properties['region_measure_bb']))
print('Area of bounding box for randomized data: {:.0f}'.format(locdata_random.properties['region_measure_bb']))
print('Ratio: {:.4f}'.format(locdata_random.properties['region_measure_bb'] / locdata.properties['region_measure_bb']))
```
Regions other from bounding box can be specified as RoiRegion instance.
```
region = lc.ConvexHull(locdata.coordinates).region
locdata_random = lc.randomize(locdata, hull_region=region)
fig, ax = plt.subplots(nrows=1, ncols=2)
locdata.data.plot.scatter(x='position_x', y='position_y', ax=ax[0], color='Blue', label='locdata')
locdata_random.data.plot.scatter(x='position_x', y='position_y', ax=ax[1], color='Blue', label='locdata')
plt.tight_layout()
plt.show()
print('Area of bounding box for structured data: {:.0f}'.format(locdata.properties['region_measure_bb']))
print('Area of bounding box for randomized data: {:.0f}'.format(locdata_random.properties['region_measure_bb']))
print('Ratio: {:.4f}'.format(locdata_random.properties['region_measure_bb'] / locdata.properties['region_measure_bb']))
```
## Apply an affine transformation to localization coordinates
A wrapper function provides affine transformations based on either numpy or open3d methods.
```
matrix = ((-1, 0), (0, -1))
offset = (10, 10)
pre_translation = (100, 100)
locdata_transformed = lc.transform_affine(locdata, matrix, offset, pre_translation, method='numpy')
fig, ax = plt.subplots()
locdata.data.plot.scatter(x='position_x', y='position_y',color='Blue', label='locdata', ax=ax)
locdata_transformed.data.plot.scatter(x='position_x', y='position_y', color='Red', label='locdata', ax=ax);
```
## Apply a BunwarpJ transformation to localization coordinates
Often a transformation matrix was computed using ImageJ. The `bunwarp` function allows applying a transformation from the raw matrix of the ImageJ/Fiji plugin BunwarpJ. Here we show a very small region with a single fluorescent bead that is recorded on a red and a green dSTORM channel.
```
matrix_path = lc.ROOT_DIR / 'tests/test_data/transform/BunwarpJ_transformation_raw_green.txt'
locdata_green = lc.load_asdf_file(path=lc.ROOT_DIR /
'tests/test_data/transform/rapidSTORM_beads_green.asdf')
locdata_red = lc.load_asdf_file(path=lc.ROOT_DIR /
'tests/test_data/transform/rapidSTORM_beads_red.asdf')
locdata_green_transformed = lc.bunwarp(locdata=locdata_green, matrix_path=matrix_path, pixel_size=(10, 10), flip=True)
fig, ax = plt.subplots()
locdata_red.data.plot.scatter(x='position_x', y='position_y',color='Red', label='locdata_red', alpha=0.5, ax=ax)
locdata_green_transformed.data.plot.scatter(x='position_x', y='position_y', color='Green', label='locdata_green_transformed', alpha=0.5, ax=ax)
locdata_green.data.plot.scatter(x='position_x', y='position_y',color='Blue', label='locdata_green', alpha=0.5, ax=ax);
fig, ax = plt.subplots(figsize=(10, 8))
lc. render_2d_rgb_mpl([locdata_red, locdata_green_transformed, locdata_green], bin_size=5, bin_range=((200, 1000), (600, 1400)), rescale='equal', ax=ax);
```
|
github_jupyter
|
from pathlib import Path
%matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import locan as lc
lc.show_versions(system=False, dependencies=False, verbose=False)
rng = np.random.default_rng(seed=1)
locdata = lc.simulate_Thomas(parent_intensity=1e-5, region=((0, 1000), (0, 1000)), cluster_mu=100, cluster_std=10, seed=rng)
locdata.print_summary()
locdata_random = lc.randomize(locdata, hull_region='bb', seed=rng)
fig, ax = plt.subplots(nrows=1, ncols=2)
locdata.data.plot.scatter(x='position_x', y='position_y', ax=ax[0], color='Blue', label='locdata')
locdata_random.data.plot.scatter(x='position_x', y='position_y', ax=ax[1], color='Blue', label='locdata')
plt.tight_layout()
plt.show()
print('Area of bounding box for structured data: {:.0f}'.format(locdata.properties['region_measure_bb']))
print('Area of bounding box for randomized data: {:.0f}'.format(locdata_random.properties['region_measure_bb']))
print('Ratio: {:.4f}'.format(locdata_random.properties['region_measure_bb'] / locdata.properties['region_measure_bb']))
region = lc.ConvexHull(locdata.coordinates).region
locdata_random = lc.randomize(locdata, hull_region=region)
fig, ax = plt.subplots(nrows=1, ncols=2)
locdata.data.plot.scatter(x='position_x', y='position_y', ax=ax[0], color='Blue', label='locdata')
locdata_random.data.plot.scatter(x='position_x', y='position_y', ax=ax[1], color='Blue', label='locdata')
plt.tight_layout()
plt.show()
print('Area of bounding box for structured data: {:.0f}'.format(locdata.properties['region_measure_bb']))
print('Area of bounding box for randomized data: {:.0f}'.format(locdata_random.properties['region_measure_bb']))
print('Ratio: {:.4f}'.format(locdata_random.properties['region_measure_bb'] / locdata.properties['region_measure_bb']))
matrix = ((-1, 0), (0, -1))
offset = (10, 10)
pre_translation = (100, 100)
locdata_transformed = lc.transform_affine(locdata, matrix, offset, pre_translation, method='numpy')
fig, ax = plt.subplots()
locdata.data.plot.scatter(x='position_x', y='position_y',color='Blue', label='locdata', ax=ax)
locdata_transformed.data.plot.scatter(x='position_x', y='position_y', color='Red', label='locdata', ax=ax);
matrix_path = lc.ROOT_DIR / 'tests/test_data/transform/BunwarpJ_transformation_raw_green.txt'
locdata_green = lc.load_asdf_file(path=lc.ROOT_DIR /
'tests/test_data/transform/rapidSTORM_beads_green.asdf')
locdata_red = lc.load_asdf_file(path=lc.ROOT_DIR /
'tests/test_data/transform/rapidSTORM_beads_red.asdf')
locdata_green_transformed = lc.bunwarp(locdata=locdata_green, matrix_path=matrix_path, pixel_size=(10, 10), flip=True)
fig, ax = plt.subplots()
locdata_red.data.plot.scatter(x='position_x', y='position_y',color='Red', label='locdata_red', alpha=0.5, ax=ax)
locdata_green_transformed.data.plot.scatter(x='position_x', y='position_y', color='Green', label='locdata_green_transformed', alpha=0.5, ax=ax)
locdata_green.data.plot.scatter(x='position_x', y='position_y',color='Blue', label='locdata_green', alpha=0.5, ax=ax);
fig, ax = plt.subplots(figsize=(10, 8))
lc. render_2d_rgb_mpl([locdata_red, locdata_green_transformed, locdata_green], bin_size=5, bin_range=((200, 1000), (600, 1400)), rescale='equal', ax=ax);
| 0.755547 | 0.945651 |
```
pip install pandas
```
# Turtle projects
Top one is main project
- color gram package - for color extraction
```
from turtle import Turtle, Screen
import turtle as t
import random
baby_turtle = Turtle()
baby_turtle.shape("turtle")
baby_turtle.color("purple")
baby_turtle.speed(50)
# baby_turtle.pensize(5)
baby_turtle.penup()
t.colormode(255)
baby_turtle.setheading(225)
baby_turtle.forward(150)
baby_turtle.setheading(0)
# color_list = ["red", "orange", "yellow", "green", "blue", "purple", "pink",'black']
def random_color():
r = random.randint(0,255)
g = random.randint(0,255)
b = random.randint(0,255)
colored = (r,g,b)
return colored
for dots in range(1,101):
baby_turtle.dot(20,random_color())
baby_turtle.forward(50)
if dots % 10 == 0:
baby_turtle.setheading(90)
baby_turtle.forward(50)
baby_turtle.setheading(180)
baby_turtle.forward(500)
baby_turtle.setheading(0)
#screen for drawing a square
small_screen = Screen()
small_screen.bgcolor("white")
small_screen.title("A Turtle's Journey")
small_screen.exitonclick()
from turtle import Turtle, Screen
# Turtle is a class that is used to draw shapes
# turtle module
baby_turtle = Turtle()
baby_turtle.shape("turtle")
baby_turtle.color("purple")
# Create a screen and click on it to exit
small_screen = Screen()
small_screen.bgcolor("lightgreen")
small_screen.title("A Turtle's Journey")
small_screen.exitonclick()
# Drawing a square with a turtle
from turtle import Turtle, Screen
baby_turtle = Turtle()
baby_turtle.shape("turtle")
baby_turtle.color("purple")
baby_turtle.forward(100)
baby_turtle.left(90)
baby_turtle.forward(100)
baby_turtle.left(90)
baby_turtle.forward(100)
baby_turtle.left(90)
baby_turtle.forward(100)
#screen for drawing a square
small_screen = Screen()
small_screen.bgcolor("lightgreen")
small_screen.title("A Turtle's Journey")
small_screen.exitonclick()
```
- import modules should be with specific in mind
1. from turtle import * , imports all but usage becomes clumsy like forward(10), readable decreases
2. from turtle import Turtle(), best way where other users can read it clearly
3. import turtle, best when on time use case
## Highway
```
from turtle import Turtle, Screen
baby_turtle = Turtle()
baby_turtle.shape("turtle")
baby_turtle.color("purple")
# dash lane😅
for i in range(50):
baby_turtle.forward(10)
baby_turtle.penup()
baby_turtle.forward(10)
baby_turtle.pendown()
#screen for drawing a square
small_screen = Screen()
small_screen.bgcolor("lightgreen")
small_screen.title("A Turtle's Journey")
small_screen.exitonclick()
```
## Shapes
```
from turtle import Turtle, Screen
baby_turtle = Turtle()
baby_turtle.shape("turtle")
baby_turtle.color("purple")
color_list = ["red", "orange", "yellow", "green", "blue", "purple", "pink",'black']
# shapes 😅
for i in range(3,10):
baby_turtle.color(color_list[i-3])
for j in range(i):
ang = 360/i
baby_turtle.forward(100)
baby_turtle.left(ang)
#screen for drawing a square
small_screen = Screen()
small_screen.bgcolor("white")
small_screen.title("A Turtle's Journey")
small_screen.exitonclick()
from turtle import Turtle, Screen
import random
baby_turtle = Turtle()
baby_turtle.shape("turtle")
baby_turtle.color("purple")
color_list = ["red", "orange", "yellow", "green", "blue", "purple", "pink",'black']
# shapes 😅
for i in range(3,10):
baby_turtle.color(random.choice(color_list))
for j in range(i):
ang = 360/i
baby_turtle.forward(100)
baby_turtle.left(ang)
#screen for drawing a square
small_screen = Screen()
small_screen.bgcolor("white")
small_screen.title("A Turtle's Journey")
small_screen.exitonclick()
```
## Random walker
```
from turtle import Turtle, Screen
import random
baby_turtle = Turtle()
baby_turtle.shape("turtle")
baby_turtle.color("purple")
baby_turtle.speed(2)
baby_turtle.pensize(5)
color_list = ["red", "orange", "yellow", "green", "blue", "purple", "pink",'black']
direction_list = [0,90,180,270]
# random walker 😅
for i in range(100):
baby_turtle.color(random.choice(color_list))
baby_turtle.forward(20)
baby_turtle.setheading(random.choice(direction_list))
#screen for drawing a square
small_screen = Screen()
small_screen.bgcolor("white")
small_screen.title("A Turtle's Journey")
small_screen.exitonclick()
from turtle import Turtle, Screen
import turtle as t
import random
baby_turtle = Turtle()
baby_turtle.shape("turtle")
baby_turtle.color("purple")
baby_turtle.speed(2)
baby_turtle.pensize(5)
t.colormode(255)
# color_list = ["red", "orange", "yellow", "green", "blue", "purple", "pink",'black']
def random_color():
r = random.randint(0,255)
g = random.randint(0,255)
b = random.randint(0,255)
colored = (r,g,b)
return colored
direction_list = [0,90,180,270]
# random walker 😅
for i in range(100):
baby_turtle.color(random_color())
baby_turtle.forward(20)
baby_turtle.setheading(random.choice(direction_list))
#screen for drawing a square
small_screen = Screen()
small_screen.bgcolor("white")
small_screen.title("A Turtle's Journey")
small_screen.exitonclick()
from turtle import Turtle, Screen
import turtle as t
import random
baby_turtle = Turtle()
baby_turtle.shape("turtle")
baby_turtle.color("purple")
baby_turtle.speed(50)
# baby_turtle.pensize(5)
t.colormode(255)
# color_list = ["red", "orange", "yellow", "green", "blue", "purple", "pink",'black']
def random_color():
r = random.randint(0,255)
g = random.randint(0,255)
b = random.randint(0,255)
colored = (r,g,b)
return colored
direction_list = [0,90,180,270]
# random walker 😅
for i in range(150):
baby_turtle.color(random_color())
baby_turtle.circle(100)
baby_turtle.setheading(baby_turtle.heading()+5) #turtle.heading()
#screen for drawing a square
small_screen = Screen()
small_screen.bgcolor("white")
small_screen.title("A Turtle's Journey")
small_screen.exitonclick()
```
|
github_jupyter
|
pip install pandas
from turtle import Turtle, Screen
import turtle as t
import random
baby_turtle = Turtle()
baby_turtle.shape("turtle")
baby_turtle.color("purple")
baby_turtle.speed(50)
# baby_turtle.pensize(5)
baby_turtle.penup()
t.colormode(255)
baby_turtle.setheading(225)
baby_turtle.forward(150)
baby_turtle.setheading(0)
# color_list = ["red", "orange", "yellow", "green", "blue", "purple", "pink",'black']
def random_color():
r = random.randint(0,255)
g = random.randint(0,255)
b = random.randint(0,255)
colored = (r,g,b)
return colored
for dots in range(1,101):
baby_turtle.dot(20,random_color())
baby_turtle.forward(50)
if dots % 10 == 0:
baby_turtle.setheading(90)
baby_turtle.forward(50)
baby_turtle.setheading(180)
baby_turtle.forward(500)
baby_turtle.setheading(0)
#screen for drawing a square
small_screen = Screen()
small_screen.bgcolor("white")
small_screen.title("A Turtle's Journey")
small_screen.exitonclick()
from turtle import Turtle, Screen
# Turtle is a class that is used to draw shapes
# turtle module
baby_turtle = Turtle()
baby_turtle.shape("turtle")
baby_turtle.color("purple")
# Create a screen and click on it to exit
small_screen = Screen()
small_screen.bgcolor("lightgreen")
small_screen.title("A Turtle's Journey")
small_screen.exitonclick()
# Drawing a square with a turtle
from turtle import Turtle, Screen
baby_turtle = Turtle()
baby_turtle.shape("turtle")
baby_turtle.color("purple")
baby_turtle.forward(100)
baby_turtle.left(90)
baby_turtle.forward(100)
baby_turtle.left(90)
baby_turtle.forward(100)
baby_turtle.left(90)
baby_turtle.forward(100)
#screen for drawing a square
small_screen = Screen()
small_screen.bgcolor("lightgreen")
small_screen.title("A Turtle's Journey")
small_screen.exitonclick()
from turtle import Turtle, Screen
baby_turtle = Turtle()
baby_turtle.shape("turtle")
baby_turtle.color("purple")
# dash lane😅
for i in range(50):
baby_turtle.forward(10)
baby_turtle.penup()
baby_turtle.forward(10)
baby_turtle.pendown()
#screen for drawing a square
small_screen = Screen()
small_screen.bgcolor("lightgreen")
small_screen.title("A Turtle's Journey")
small_screen.exitonclick()
from turtle import Turtle, Screen
baby_turtle = Turtle()
baby_turtle.shape("turtle")
baby_turtle.color("purple")
color_list = ["red", "orange", "yellow", "green", "blue", "purple", "pink",'black']
# shapes 😅
for i in range(3,10):
baby_turtle.color(color_list[i-3])
for j in range(i):
ang = 360/i
baby_turtle.forward(100)
baby_turtle.left(ang)
#screen for drawing a square
small_screen = Screen()
small_screen.bgcolor("white")
small_screen.title("A Turtle's Journey")
small_screen.exitonclick()
from turtle import Turtle, Screen
import random
baby_turtle = Turtle()
baby_turtle.shape("turtle")
baby_turtle.color("purple")
color_list = ["red", "orange", "yellow", "green", "blue", "purple", "pink",'black']
# shapes 😅
for i in range(3,10):
baby_turtle.color(random.choice(color_list))
for j in range(i):
ang = 360/i
baby_turtle.forward(100)
baby_turtle.left(ang)
#screen for drawing a square
small_screen = Screen()
small_screen.bgcolor("white")
small_screen.title("A Turtle's Journey")
small_screen.exitonclick()
from turtle import Turtle, Screen
import random
baby_turtle = Turtle()
baby_turtle.shape("turtle")
baby_turtle.color("purple")
baby_turtle.speed(2)
baby_turtle.pensize(5)
color_list = ["red", "orange", "yellow", "green", "blue", "purple", "pink",'black']
direction_list = [0,90,180,270]
# random walker 😅
for i in range(100):
baby_turtle.color(random.choice(color_list))
baby_turtle.forward(20)
baby_turtle.setheading(random.choice(direction_list))
#screen for drawing a square
small_screen = Screen()
small_screen.bgcolor("white")
small_screen.title("A Turtle's Journey")
small_screen.exitonclick()
from turtle import Turtle, Screen
import turtle as t
import random
baby_turtle = Turtle()
baby_turtle.shape("turtle")
baby_turtle.color("purple")
baby_turtle.speed(2)
baby_turtle.pensize(5)
t.colormode(255)
# color_list = ["red", "orange", "yellow", "green", "blue", "purple", "pink",'black']
def random_color():
r = random.randint(0,255)
g = random.randint(0,255)
b = random.randint(0,255)
colored = (r,g,b)
return colored
direction_list = [0,90,180,270]
# random walker 😅
for i in range(100):
baby_turtle.color(random_color())
baby_turtle.forward(20)
baby_turtle.setheading(random.choice(direction_list))
#screen for drawing a square
small_screen = Screen()
small_screen.bgcolor("white")
small_screen.title("A Turtle's Journey")
small_screen.exitonclick()
from turtle import Turtle, Screen
import turtle as t
import random
baby_turtle = Turtle()
baby_turtle.shape("turtle")
baby_turtle.color("purple")
baby_turtle.speed(50)
# baby_turtle.pensize(5)
t.colormode(255)
# color_list = ["red", "orange", "yellow", "green", "blue", "purple", "pink",'black']
def random_color():
r = random.randint(0,255)
g = random.randint(0,255)
b = random.randint(0,255)
colored = (r,g,b)
return colored
direction_list = [0,90,180,270]
# random walker 😅
for i in range(150):
baby_turtle.color(random_color())
baby_turtle.circle(100)
baby_turtle.setheading(baby_turtle.heading()+5) #turtle.heading()
#screen for drawing a square
small_screen = Screen()
small_screen.bgcolor("white")
small_screen.title("A Turtle's Journey")
small_screen.exitonclick()
| 0.467089 | 0.730013 |
```
import os
import subprocess
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns # I love this package!
sns.set_style('white')
import torch
```
### Loss Trend Check
```
# load check point
model_path = 'checkpoint.pth.tar'
checkpoint = torch.load(model_path)
loss_history_train = checkpoint['loss_history_train']
loss_history_val = checkpoint['loss_history_val']
loss_train = [np.mean(l) for l in loss_history_train]
loss_val = [np.mean(l) for l in loss_history_val]
plt.plot(loss_train, label = 'Train Loss')
plt.plot(loss_val, label = 'Val Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Loss Trend')
plt.legend()
plt.show()
```
### Model performance
```
model_path = 'model_best.pth.tar'
# calculate outputs for the test data with our best model
output_csv_path = 'pred.csv'
command = ('python pred.py '
'--img_dir /home/dhwon/data_hdd2/UCLA-protest/img/test/ '
'--output_csvpath {csv_path} '
'--model {model} --batch_size 4 --cuda'
.format(csv_path = output_csv_path, model = model_path))
!{command}
# load prediction
df_pred = pd.read_csv(output_csv_path)
df_pred['imgpath'] = df_pred['imgpath'].apply(os.path.basename)
# load target
test_label_path = '/home/dhwon/data_hdd2/UCLA-protest/annot_test.txt'
df_target = pd.read_csv(test_label_path, delimiter= '\t')
```
#### Binary Variables
```
from sklearn.metrics import accuracy_score, roc_auc_score, roc_curve
def plot_roc(attr, target, pred):
"""Plot a ROC curve and show the accuracy score and the AUC"""
fig, ax = plt.subplots()
auc = roc_auc_score(target, pred)
acc = accuracy_score(target, (pred >= 0.5).astype(int))
fpr, tpr, _ = roc_curve(target, pred)
plt.plot(fpr, tpr, lw = 2, label = attr.title())
plt.legend(loc = 4, fontsize = 15)
plt.title(('ROC Curve for {attr} (Accuracy = {acc:.3f}, AUC = {auc:.3f})'
.format(attr = attr.title(), acc= acc, auc = auc)),
fontsize = 15)
plt.xlabel('False Positive Rate', fontsize = 15)
plt.ylabel('True Positive Rate', fontsize = 15)
plt.show()
return fig
# plot ROC curve for protest
attr = "protest"
target = df_target[attr]
pred = df_pred[attr]
fig = plot_roc(attr, target, pred)
fig.savefig(os.path.join('files', attr+'.png'))
# plot ROC curves for visual attributes
for attr in df_pred.columns[3:]:
target = df_target[attr]
pred = df_pred[attr][target != '-']
target = target[target != '-'].astype(int)
fig = plot_roc(attr, target, pred)
fig.savefig(os.path.join('files', attr+'.png'))
```
#### Violence
```
import scipy.stats as stats
attr = 'violence'
pred = df_pred[df_target['protest'] == 1][attr].tolist()
target = df_target[df_target['protest'] == 1][attr].astype(float).tolist()
fig, ax = plt.subplots()
plt.scatter(target, pred, label = attr.title())
plt.xlim([-.05,1.05])
plt.ylim([-.05,1.05])
plt.xlabel('Annotation', fontsize = 15)
plt.ylabel('Predicton', fontsize = 15)
corr, pval = stats.pearsonr(target, pred)
plt.title(('Scatter Plot for {attr} (Correlation = {corr:.3f})'
.format(attr = attr.title(), corr= corr)), fontsize = 15)
plt.show()
fig.savefig(os.path.join('files', attr+'.png'))
```
|
github_jupyter
|
import os
import subprocess
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns # I love this package!
sns.set_style('white')
import torch
# load check point
model_path = 'checkpoint.pth.tar'
checkpoint = torch.load(model_path)
loss_history_train = checkpoint['loss_history_train']
loss_history_val = checkpoint['loss_history_val']
loss_train = [np.mean(l) for l in loss_history_train]
loss_val = [np.mean(l) for l in loss_history_val]
plt.plot(loss_train, label = 'Train Loss')
plt.plot(loss_val, label = 'Val Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Loss Trend')
plt.legend()
plt.show()
model_path = 'model_best.pth.tar'
# calculate outputs for the test data with our best model
output_csv_path = 'pred.csv'
command = ('python pred.py '
'--img_dir /home/dhwon/data_hdd2/UCLA-protest/img/test/ '
'--output_csvpath {csv_path} '
'--model {model} --batch_size 4 --cuda'
.format(csv_path = output_csv_path, model = model_path))
!{command}
# load prediction
df_pred = pd.read_csv(output_csv_path)
df_pred['imgpath'] = df_pred['imgpath'].apply(os.path.basename)
# load target
test_label_path = '/home/dhwon/data_hdd2/UCLA-protest/annot_test.txt'
df_target = pd.read_csv(test_label_path, delimiter= '\t')
from sklearn.metrics import accuracy_score, roc_auc_score, roc_curve
def plot_roc(attr, target, pred):
"""Plot a ROC curve and show the accuracy score and the AUC"""
fig, ax = plt.subplots()
auc = roc_auc_score(target, pred)
acc = accuracy_score(target, (pred >= 0.5).astype(int))
fpr, tpr, _ = roc_curve(target, pred)
plt.plot(fpr, tpr, lw = 2, label = attr.title())
plt.legend(loc = 4, fontsize = 15)
plt.title(('ROC Curve for {attr} (Accuracy = {acc:.3f}, AUC = {auc:.3f})'
.format(attr = attr.title(), acc= acc, auc = auc)),
fontsize = 15)
plt.xlabel('False Positive Rate', fontsize = 15)
plt.ylabel('True Positive Rate', fontsize = 15)
plt.show()
return fig
# plot ROC curve for protest
attr = "protest"
target = df_target[attr]
pred = df_pred[attr]
fig = plot_roc(attr, target, pred)
fig.savefig(os.path.join('files', attr+'.png'))
# plot ROC curves for visual attributes
for attr in df_pred.columns[3:]:
target = df_target[attr]
pred = df_pred[attr][target != '-']
target = target[target != '-'].astype(int)
fig = plot_roc(attr, target, pred)
fig.savefig(os.path.join('files', attr+'.png'))
import scipy.stats as stats
attr = 'violence'
pred = df_pred[df_target['protest'] == 1][attr].tolist()
target = df_target[df_target['protest'] == 1][attr].astype(float).tolist()
fig, ax = plt.subplots()
plt.scatter(target, pred, label = attr.title())
plt.xlim([-.05,1.05])
plt.ylim([-.05,1.05])
plt.xlabel('Annotation', fontsize = 15)
plt.ylabel('Predicton', fontsize = 15)
corr, pval = stats.pearsonr(target, pred)
plt.title(('Scatter Plot for {attr} (Correlation = {corr:.3f})'
.format(attr = attr.title(), corr= corr)), fontsize = 15)
plt.show()
fig.savefig(os.path.join('files', attr+'.png'))
| 0.826991 | 0.694173 |
## D(St)reams of Anomalies
The real world does not slow down for bad data
1. Set up a data science project structure in a new git repository in your GitHub account
2. Download the benchmark data set from
https://www.kaggle.com/boltzmannbrain/nab or
https://github.com/numenta/NAB/tree/master/data
3. Load the one of the data set into panda data frames
4. Formulate one or two ideas on how feature engineering would help the data set to establish additional value using exploratory data analysis
5. Build one or more anomaly detection models to determine the anomalies using the other columns as features
6. Document your process and results
7. Commit your notebook, source code, visualizations and other supporting files to the git repository in GitHub
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.ensemble import IsolationForest
from sklearn.preprocessing import StandardScaler
```
**The dataset taken is the timeseries data with the ambient temperatures settings in an office. The goal is to find the anomlalies in the temperature values set in the environment. The temperature may vary from place to place and season to season. In the dataset, the location is same and the temperature values are given from July 2013 to May 2014.**
```
ambTempDf = pd.read_csv('../data/ambient_temperature_system_failure.txt')
ambTempDf.head(5)
ambTempDf.columns
```
**Since the timestamp is an object type, it is to be converted to the the datetime object for further requirements.**
```
ambTempDf['timestamp'] = pd.to_datetime(ambTempDf['timestamp'])
ambTempDf.plot(x='timestamp',y='value')
plt.show()
```
**The above graph shows the distribution of the temperature given the timestamp.**
**Lets apply some of the statistical methods to detect the probable outliers without considering the timestamps.**
## Standard Deviation Method
**In this method, the mean and standard deviataion are calculated and by using upper and lower threshold boundary, the data is filtered.**
```
mean = ambTempDf.value.mean()
stdDev = ambTempDf.value.std()
print('minimum value : {}'.format(ambTempDf['value'].min()))
print('maximum value : {}'.format(ambTempDf['value'].max()))
print('mean : {}'.format(mean))
print('standard deviation : {}'.format(stdDev))
cut_off = stdDev * 3
lower, upper = mean - cut_off, mean + cut_off
print('lower boundary : {}, upper boundary : {}'.format(lower,upper))
```
**The cutoff is taken as three times the standard deviation and lower and upper boundary is obtained by adding to and subtracting the cut off from mean respectively. Now, the anomaly can be filtered.**
```
anomaly = ambTempDf.loc[(ambTempDf['value']<lower)|(ambTempDf['value']>upper), ['timestamp','value']]
plt.plot(ambTempDf.index,ambTempDf['value'],color='blue',label='normal')
plt.scatter(anomaly.index,anomaly['value'].values,color='red',label='anomaly')
plt.xlabel('index')
plt.ylabel('temperature values')
plt.legend()
plt.show()
```
**The above graph shows the red dots as the anomaly values. According to this method, the values that are around the minumum and maximum are considered as the outliers which may or may not be true in all cases.**
**The another statistical method applied is Interquartile range. In this method, the $25^{th}$ and $75^{th}$ percentiles are calculated and the difference obtained is taken to compute the upper and lower boundary.**
## Interquartile range
```
plt.hist(ambTempDf['value'],bins=50)
plt.title('Histogram of the temperature values')
plt.show()
quartile25,quartile75 = np.percentile(ambTempDf.value,25), np.percentile(ambTempDf.value,75)
print('quartile25: {} ,quartile75: {}'.format(quartile25,quartile75))
interRange = quartile75-quartile25
print('inter quartile range: {}'.format(interRange))
cutOff = 1.5*interRange
lower,upper = quartile25-cutOff , quartile75+cutOff
print('lower: {}, upper: {}'.format(lower,upper))
anomaly1 = ambTempDf.loc[(ambTempDf['value']<lower)|(ambTempDf['value']>upper), ['timestamp','value']]
plt.hist([ambTempDf['value'],anomaly1['value']],color=['blue', 'red'],bins=50,stacked=True,label=['normal','anomaly'])
plt.legend()
plt.show()
ax = sns.boxplot(x=ambTempDf['value'])
plt.plot(ambTempDf.index,ambTempDf['value'],color='blue',label='normal')
plt.scatter(anomaly1.index,anomaly1['value'].values,color='red',label='anomaly')
plt.xlabel('index')
plt.ylabel('temperature values')
plt.legend()
plt.show()
```
**The obtained plots show the similar result as that was obtained from the standard deviation method. Since, we have not taken any consideration such as day and night cases, weekdays or weekends about the timestamp, we cannot be sure that the anomaly detected by using the values only are accurate.**
**Now we will use isolation forest and consider the timestamp as well to detect the anomaly in the data.**
## Isolation Forest
**From the timestamp, we can extract the features such as seasons, working hours, week days and weekends which might be helpful for anomaly prediction. for eg: In weekends, the office may be closed and the temperature system may operate differently.
```
ambTempDf['fall'] = (ambTempDf['timestamp'].dt.month).isin([9,10,11]).astype(int)
ambTempDf['spring'] = (ambTempDf['timestamp'].dt.month).isin([3,4,5]).astype(int)
ambTempDf['summer'] = (ambTempDf['timestamp'].dt.month).isin([6,7,8]).astype(int)
ambTempDf['winter'] = (ambTempDf['timestamp'].dt.month).isin([12,1,2]).astype(int)
ambTempDf['hours'] = ambTempDf['timestamp'].dt.hour
ambTempDf['dayLight'] = ((ambTempDf['hours'] >= 7) & (ambTempDf['hours'] <= 22)).astype(int)
ambTempDf['dayOfTheWeek'] = ambTempDf['timestamp'].dt.dayofweek
ambTempDf['weekDay'] = (ambTempDf['dayOfTheWeek'] < 5).astype(int)
ambTempDf.head(5)
```
**The above table shows the features extracted for the prediction.**
**Lets train the isolation forest model with the extracted features.**
```
outlierFrac = 0.01
data = ambTempDf[['value','hours','dayLight','dayOfTheWeek','weekDay','fall','spring','summer','winter']]
scaler=StandardScaler()
npScaled = scaler.fit_transform(data)
dataFrame = pd.DataFrame(npScaled)
model = IsolationForest(contamination=outlierFrac)
model.fit(dataFrame)
ambTempDf['anomaly2'] = pd.Series(model.predict(dataFrame))
a = ambTempDf.loc[ambTempDf['anomaly2'] == -1, ['timestamp', 'value']]
plt.plot(ambTempDf.index, ambTempDf['value'], color='blue', label = 'Normal')
plt.scatter(a.index,a['value'],color='red', label = 'Anomaly')
plt.legend()
plt.show()
```
**Unlike the results of statistical methods performed above, the result of this model shows the anomalies other than the outliers. We can segregate the features and find the cause for the anomaly.**
```
anomalies = ambTempDf[ambTempDf['anomaly2'] ==-1]
print('count: {}'.format(anomalies.shape[0]))
anomalies[['fall','spring','summer','winter']].sum()
normalWinter = ambTempDf[(ambTempDf['winter']==1) & (ambTempDf['anomaly2']==1) &(ambTempDf['weekDay']==0)] \
.groupby('hours')['value'].agg(['count','mean']).reset_index()
anomalyWinter = anomalies[(anomalies['winter']==1) & (anomalies['anomaly2']==-1) &(anomalies['weekDay']==0)].\
groupby('hours')['value'].agg(['count','mean']).reset_index()
plt.scatter(normalWinter['hours'], normalWinter['mean'],label='normal')
plt.scatter(anomalyWinter.hours, anomalyWinter['mean'],label='anomaly')
plt.title('Hours vs Temperature(Winter Weekend)')
plt.legend()
plt.show()
```
**The algorithm seems to consider the temperature significantly above the average as the anomaly for the particular hour, winter season and weekend as shown in the figure above. Here the isolation tree considers combination of more than one features to isolate such anomalies.**
```
normalSummerWeekEnd = ambTempDf[(ambTempDf['summer']==1) & (ambTempDf['anomaly2']==1) &(ambTempDf['weekDay']==0)] \
.groupby('hours')['value'].agg(['count','mean']).reset_index()
anomalySummerWeekEnd = anomalies[(anomalies['summer']==1) & (anomalies['anomaly2']==-1) &(anomalies['weekDay']==0)].\
groupby('hours')['value'].agg(['count','mean']).reset_index()
plt.scatter(normalSummerWeekEnd['hours'], normalSummerWeekEnd['mean'],label='normal_weekend')
plt.scatter(anomalySummerWeekEnd.hours, anomalySummerWeekEnd['mean'],label='anomaly_weekend')
normalSummerWeekDay = ambTempDf[(ambTempDf['summer']==1) & (ambTempDf['anomaly2']==1) &(ambTempDf['weekDay']==1)] \
.groupby('hours')['value'].agg(['count','mean']).reset_index()
anomalySummerWeekDay = anomalies[(anomalies['summer']==1) & (anomalies['anomaly2']==-1) &(anomalies['weekDay']==1)].\
groupby('hours')['value'].agg(['count','mean']).reset_index()
plt.scatter(normalSummerWeekDay['hours'], normalSummerWeekDay['mean'],label='normal_weekday')
plt.scatter(anomalySummerWeekDay.hours, anomalySummerWeekDay['mean'],label='anomaly_weekday')
plt.title('Hours vs Temperature(Summer)')
plt.legend()
plt.show()
```
**It can be observed from the figure above that the ambient temperature increases in the weekdays between office hours and starts to decrease after office hours. However, in weekends, the ambient temperature decreases normally. Isolation forest has successfully caputered this phenomenon and didnot catagorize the pattern change as the anomaly. The outliers from the patterns are caputured as anomalies. Since this method is unsupervised learning, there may be some fallacies. For example, the mean temperature for hour 23 for winter weekend is catagorized anomaly although it seems to be normal.**
|
github_jupyter
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.ensemble import IsolationForest
from sklearn.preprocessing import StandardScaler
ambTempDf = pd.read_csv('../data/ambient_temperature_system_failure.txt')
ambTempDf.head(5)
ambTempDf.columns
ambTempDf['timestamp'] = pd.to_datetime(ambTempDf['timestamp'])
ambTempDf.plot(x='timestamp',y='value')
plt.show()
mean = ambTempDf.value.mean()
stdDev = ambTempDf.value.std()
print('minimum value : {}'.format(ambTempDf['value'].min()))
print('maximum value : {}'.format(ambTempDf['value'].max()))
print('mean : {}'.format(mean))
print('standard deviation : {}'.format(stdDev))
cut_off = stdDev * 3
lower, upper = mean - cut_off, mean + cut_off
print('lower boundary : {}, upper boundary : {}'.format(lower,upper))
anomaly = ambTempDf.loc[(ambTempDf['value']<lower)|(ambTempDf['value']>upper), ['timestamp','value']]
plt.plot(ambTempDf.index,ambTempDf['value'],color='blue',label='normal')
plt.scatter(anomaly.index,anomaly['value'].values,color='red',label='anomaly')
plt.xlabel('index')
plt.ylabel('temperature values')
plt.legend()
plt.show()
plt.hist(ambTempDf['value'],bins=50)
plt.title('Histogram of the temperature values')
plt.show()
quartile25,quartile75 = np.percentile(ambTempDf.value,25), np.percentile(ambTempDf.value,75)
print('quartile25: {} ,quartile75: {}'.format(quartile25,quartile75))
interRange = quartile75-quartile25
print('inter quartile range: {}'.format(interRange))
cutOff = 1.5*interRange
lower,upper = quartile25-cutOff , quartile75+cutOff
print('lower: {}, upper: {}'.format(lower,upper))
anomaly1 = ambTempDf.loc[(ambTempDf['value']<lower)|(ambTempDf['value']>upper), ['timestamp','value']]
plt.hist([ambTempDf['value'],anomaly1['value']],color=['blue', 'red'],bins=50,stacked=True,label=['normal','anomaly'])
plt.legend()
plt.show()
ax = sns.boxplot(x=ambTempDf['value'])
plt.plot(ambTempDf.index,ambTempDf['value'],color='blue',label='normal')
plt.scatter(anomaly1.index,anomaly1['value'].values,color='red',label='anomaly')
plt.xlabel('index')
plt.ylabel('temperature values')
plt.legend()
plt.show()
ambTempDf['fall'] = (ambTempDf['timestamp'].dt.month).isin([9,10,11]).astype(int)
ambTempDf['spring'] = (ambTempDf['timestamp'].dt.month).isin([3,4,5]).astype(int)
ambTempDf['summer'] = (ambTempDf['timestamp'].dt.month).isin([6,7,8]).astype(int)
ambTempDf['winter'] = (ambTempDf['timestamp'].dt.month).isin([12,1,2]).astype(int)
ambTempDf['hours'] = ambTempDf['timestamp'].dt.hour
ambTempDf['dayLight'] = ((ambTempDf['hours'] >= 7) & (ambTempDf['hours'] <= 22)).astype(int)
ambTempDf['dayOfTheWeek'] = ambTempDf['timestamp'].dt.dayofweek
ambTempDf['weekDay'] = (ambTempDf['dayOfTheWeek'] < 5).astype(int)
ambTempDf.head(5)
outlierFrac = 0.01
data = ambTempDf[['value','hours','dayLight','dayOfTheWeek','weekDay','fall','spring','summer','winter']]
scaler=StandardScaler()
npScaled = scaler.fit_transform(data)
dataFrame = pd.DataFrame(npScaled)
model = IsolationForest(contamination=outlierFrac)
model.fit(dataFrame)
ambTempDf['anomaly2'] = pd.Series(model.predict(dataFrame))
a = ambTempDf.loc[ambTempDf['anomaly2'] == -1, ['timestamp', 'value']]
plt.plot(ambTempDf.index, ambTempDf['value'], color='blue', label = 'Normal')
plt.scatter(a.index,a['value'],color='red', label = 'Anomaly')
plt.legend()
plt.show()
anomalies = ambTempDf[ambTempDf['anomaly2'] ==-1]
print('count: {}'.format(anomalies.shape[0]))
anomalies[['fall','spring','summer','winter']].sum()
normalWinter = ambTempDf[(ambTempDf['winter']==1) & (ambTempDf['anomaly2']==1) &(ambTempDf['weekDay']==0)] \
.groupby('hours')['value'].agg(['count','mean']).reset_index()
anomalyWinter = anomalies[(anomalies['winter']==1) & (anomalies['anomaly2']==-1) &(anomalies['weekDay']==0)].\
groupby('hours')['value'].agg(['count','mean']).reset_index()
plt.scatter(normalWinter['hours'], normalWinter['mean'],label='normal')
plt.scatter(anomalyWinter.hours, anomalyWinter['mean'],label='anomaly')
plt.title('Hours vs Temperature(Winter Weekend)')
plt.legend()
plt.show()
normalSummerWeekEnd = ambTempDf[(ambTempDf['summer']==1) & (ambTempDf['anomaly2']==1) &(ambTempDf['weekDay']==0)] \
.groupby('hours')['value'].agg(['count','mean']).reset_index()
anomalySummerWeekEnd = anomalies[(anomalies['summer']==1) & (anomalies['anomaly2']==-1) &(anomalies['weekDay']==0)].\
groupby('hours')['value'].agg(['count','mean']).reset_index()
plt.scatter(normalSummerWeekEnd['hours'], normalSummerWeekEnd['mean'],label='normal_weekend')
plt.scatter(anomalySummerWeekEnd.hours, anomalySummerWeekEnd['mean'],label='anomaly_weekend')
normalSummerWeekDay = ambTempDf[(ambTempDf['summer']==1) & (ambTempDf['anomaly2']==1) &(ambTempDf['weekDay']==1)] \
.groupby('hours')['value'].agg(['count','mean']).reset_index()
anomalySummerWeekDay = anomalies[(anomalies['summer']==1) & (anomalies['anomaly2']==-1) &(anomalies['weekDay']==1)].\
groupby('hours')['value'].agg(['count','mean']).reset_index()
plt.scatter(normalSummerWeekDay['hours'], normalSummerWeekDay['mean'],label='normal_weekday')
plt.scatter(anomalySummerWeekDay.hours, anomalySummerWeekDay['mean'],label='anomaly_weekday')
plt.title('Hours vs Temperature(Summer)')
plt.legend()
plt.show()
| 0.471953 | 0.992085 |
```
import numpy as np
import nltk
from collections import Counter
import pandas as pd
import seaborn as sns
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
sns.set_context("paper", font_scale=1.2)
%matplotlib notebook
%load_ext autoreload
%autoreload 2
import os
import re
import sys
sys.path.append(os.path.join(os.getcwd(), "..\..\src"))
import util.io as mio
from util import statsUtil
import util.plotting as mplot
from model.conversationDataframe import ConversationDataframe
from stats.iConvStats import IConvStats
from stats.wordsCountStats import WordsCountStats
```
# Intro
This notebook is used as utility/tool for analysis of text.
The goal is to get some insight about the structure, content and quality of the text.
Examples: analysis of CV, personal articles, job ads.
# Load Text
Load text you want to analyse
```
def load_text(filepaths):
"""
Load text you want to analyse.
:param filepaths: list of paths to text files to load
:return: single string representing all retrieved text
"""
text = ""
for path in filepaths:
with open(path, 'r', encoding='UTF-8') as f:
text += "\n"+f.read()
return text
text = load_text([""])
```
# Basic Stats
Length, count and richness, Ngram distribution and mosr relevant features.
```
words = statsUtil.getWords(text)
types = set(words)
print("Total length: {:.0f}".format(len(text)))
print("Tokens count: {:.0f}".format(len(words)))
print("Distinct tokens count: {:.0f}".format(len(set(words))))
print("Lexical richness: {0:.5f}".format(len(types)/len(words)))
def plot_most_common(most_common_ngrams, n_most, join=False):
most_common_ngrams, count = zip(*most_common_ngrams.most_common(n_most))
if join:
most_common_ngrams = [" ".join(list(e)) for e in most_common_ngrams]
ax = sns.pointplot(y=most_common_ngrams, x=count)
sns.plt.show()
# Most common words
words_count = Counter(words)
# Plot most common words
plot_most_common(words_count, n_most=30)
most_common_bigrams = Counter(nltk.bigrams(words))
plot_most_common(most_common_bigrams, 20, join=True)
most_common_trigrams = Counter(nltk.trigrams(words))
plot_most_common(most_common_trigrams, 20, join=True)
# Get most relevant words using TF-IDF
# For this statistic we need additional pieces of text to compare with our speech transcript
# we can simply load some corpora from NLTK
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk.corpus import stopwords
def get_top_features(text, n):
# Load corpora for different genres
c1 = nltk.corpus.gutenberg.raw('carroll-alice.txt')
c2 = nltk.corpus.inaugural.raw("2009-Obama.txt")
c3 = nltk.corpus.webtext.raw("firefox.txt")
# Load english stopwords
stops = set(stopwords.words("english"))
# Compute TF-IDF matrix and print top results for our speech
vectorizer = TfidfVectorizer(analyzer='word',stop_words=stops, ngram_range=(2,3))
tfIdf = vectorizer.fit_transform([text, c1, c2, c3]).toarray()
indices = np.argsort(tfIdf[0])[::-1]
features = vectorizer.get_feature_names()
top_features = [features[i] for i in indices[:n] if tfIdf[0][i]!=0]
return top_features
get_top_features(text, 20)
```
# Prose Stats
“Over the whole document, make the average sentence length 15-20 words, 25-33 syllables and 75-100 characters.”
```
# prose stats
sentences = list(filter(lambda x : len(x)>0, map(str.strip, re.split(r'[\.\?!\n]', text))))
sen_len = [len(sent) for sent in sentences]
print("Average sentence len {}. Max {}, min {}".format(np.mean(sen_len), max(sen_len), min(sen_len)))
for sent in sentences:
if len(sent)>300:
print("* " + sent)
```
|
github_jupyter
|
import numpy as np
import nltk
from collections import Counter
import pandas as pd
import seaborn as sns
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
sns.set_context("paper", font_scale=1.2)
%matplotlib notebook
%load_ext autoreload
%autoreload 2
import os
import re
import sys
sys.path.append(os.path.join(os.getcwd(), "..\..\src"))
import util.io as mio
from util import statsUtil
import util.plotting as mplot
from model.conversationDataframe import ConversationDataframe
from stats.iConvStats import IConvStats
from stats.wordsCountStats import WordsCountStats
def load_text(filepaths):
"""
Load text you want to analyse.
:param filepaths: list of paths to text files to load
:return: single string representing all retrieved text
"""
text = ""
for path in filepaths:
with open(path, 'r', encoding='UTF-8') as f:
text += "\n"+f.read()
return text
text = load_text([""])
words = statsUtil.getWords(text)
types = set(words)
print("Total length: {:.0f}".format(len(text)))
print("Tokens count: {:.0f}".format(len(words)))
print("Distinct tokens count: {:.0f}".format(len(set(words))))
print("Lexical richness: {0:.5f}".format(len(types)/len(words)))
def plot_most_common(most_common_ngrams, n_most, join=False):
most_common_ngrams, count = zip(*most_common_ngrams.most_common(n_most))
if join:
most_common_ngrams = [" ".join(list(e)) for e in most_common_ngrams]
ax = sns.pointplot(y=most_common_ngrams, x=count)
sns.plt.show()
# Most common words
words_count = Counter(words)
# Plot most common words
plot_most_common(words_count, n_most=30)
most_common_bigrams = Counter(nltk.bigrams(words))
plot_most_common(most_common_bigrams, 20, join=True)
most_common_trigrams = Counter(nltk.trigrams(words))
plot_most_common(most_common_trigrams, 20, join=True)
# Get most relevant words using TF-IDF
# For this statistic we need additional pieces of text to compare with our speech transcript
# we can simply load some corpora from NLTK
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk.corpus import stopwords
def get_top_features(text, n):
# Load corpora for different genres
c1 = nltk.corpus.gutenberg.raw('carroll-alice.txt')
c2 = nltk.corpus.inaugural.raw("2009-Obama.txt")
c3 = nltk.corpus.webtext.raw("firefox.txt")
# Load english stopwords
stops = set(stopwords.words("english"))
# Compute TF-IDF matrix and print top results for our speech
vectorizer = TfidfVectorizer(analyzer='word',stop_words=stops, ngram_range=(2,3))
tfIdf = vectorizer.fit_transform([text, c1, c2, c3]).toarray()
indices = np.argsort(tfIdf[0])[::-1]
features = vectorizer.get_feature_names()
top_features = [features[i] for i in indices[:n] if tfIdf[0][i]!=0]
return top_features
get_top_features(text, 20)
# prose stats
sentences = list(filter(lambda x : len(x)>0, map(str.strip, re.split(r'[\.\?!\n]', text))))
sen_len = [len(sent) for sent in sentences]
print("Average sentence len {}. Max {}, min {}".format(np.mean(sen_len), max(sen_len), min(sen_len)))
for sent in sentences:
if len(sent)>300:
print("* " + sent)
| 0.510008 | 0.604078 |
# Arduino LCD Example using AdaFruit 1.8" LCD Shield
This notebook shows a demo on Adafruit 1.8" LCD shield.
```
from pynq import Overlay
Overlay("base.bit").download()
```
## 1. Instantiate AdaFruit LCD controller
In this example, make sure that 1.8" LCD shield from Adafruit is placed on the Arduino interface.
After instantiation, users should expect to see a PYNQ logo with pink background shown on the screen.
```
from pynq.iop import Arduino_LCD18
from pynq.iop import ARDUINO
lcd = Arduino_LCD18(ARDUINO)
```
## 2. Clear the LCD screen
Clear the LCD screen so users can display other pictures.
```
lcd.clear()
```
## 3. Display a picture
The screen is 160 pixels by 128 pixels. So the largest picture that can fit in the screen is 160 by 128. To resize a picture to a desired size, users can do:
```python
from PIL import Image
img = Image.open('data/large.jpg')
w_new = 160
h_new = 128
new_img = img.resize((w_new,h_new),Image.ANTIALIAS)
new_img.save('data/small.jpg','JPEG')
img.close()
```
The format of the picture can be PNG, JPEG, BMP, or any other format that can be opened using the `Image` library. In the API, the picture will be compressed into a binary format having (per pixel) 5 bits for blue, 6 bits for green, and 5 bits for red. All the pixels (of 16 bits each) will be stored in DDR memory and then transferred to the IO processor for display.
The orientation of the picture is as shown below, while currently, only orientation 1 and 3 are supported. Orientation 3 will display picture normally, while orientation 1 will display picture upside-down.
<img src="data/adafruit_lcd18.jpg" width="400px"/>
To display the picture at the desired location, the position has to be calculated. For example, to display in the center a 76-by-25 picture with orientation 3, `x_pos` has to be (160-76/2)=42, and `y_pos` has to be (128/2)+(25/2)=76.
The parameter `background` is a list of 3 components: [R,G,B], where each component consists of 8 bits. If it is not defined, it will be defaulted to [0,0,0] (black).
```
lcd.display('data/board_small.jpg',x_pos=0,y_pos=127,
orientation=3,background=[255,255,255])
```
## 4. Animate the picture
We can provide the number of frames to the method `display()`; this will move the picture around with a desired background color.
```
lcd.display('data/logo_small.png',x_pos=0,y_pos=127,
orientation=1,background=[255,255,255],frames=100)
```
## 5. Draw a line
Draw a white line from upper left corner towards lower right corner.
The parameter `background` is a list of 3 components: [R,G,B], where each component consists of 8 bits. If it is not defined, it will be defaulted to [0,0,0] (black).
Similarly, the parameter `color` defines the color of the line, with a default value of [255,255,255] (white).
All the 3 `draw_line()` use the default orientation 3.
Note that if the background is changed, the screen will also be cleared. Otherwise the old lines will still stay on the screen.
```
lcd.clear()
lcd.draw_line(x_start_pos=151,y_start_pos=98,x_end_pos=19,y_end_pos=13)
```
Draw a 100-pixel wide red horizontal line, on a yellow background. Since the background is changed, the screen will be cleared automatically.
```
lcd.draw_line(50,50,150,50,color=[255,0,0],background=[255,255,0])
```
Draw a 80-pixel tall blue vertical line, on the same yellow background.
```
lcd.draw_line(50,20,50,120,[0,0,255],[255,255,0])
```
## 6. Print a scaled character
Users can print a scaled string at a desired position with a desired text color and background color.
The first `print_string()` prints "Hello, PYNQ!" at 1st row, 1st column, with white text color and blue background.
The second `print_string()` prints today's date at 5th row, 10th column, with yellow text color and blue background.
Note that if the background is changed, the screen will also be cleared. Otherwise the old strings will still stay on the screen.
```
text = 'Hello, PYNQ!'
lcd.print_string(1,1,text,[255,255,255],[0,0,255])
import time
text = time.strftime("%d/%m/%Y")
lcd.print_string(5,10,text,[255,255,0],[0,0,255])
```
## 7. Draw a filled rectangle
The next 3 cells will draw 3 rectangles of different colors, respectively. All of them use the default black background and orientation 3.
```
lcd.draw_filled_rectangle(x_start_pos=10,y_start_pos=10,
width=60,height=80,color=[64,255,0])
lcd.draw_filled_rectangle(x_start_pos=20,y_start_pos=30,
width=80,height=30,color=[255,128,0])
lcd.draw_filled_rectangle(x_start_pos=90,y_start_pos=40,
width=70,height=120,color=[64,0,255])
```
## 8. Read joystick button
```
button=lcd.read_joystick()
if button == 1:
print('Left')
elif button == 2:
print('Down')
elif button==3:
print('Center')
elif button==4:
print('Right')
elif button==5:
print('Up')
else:
print('Not pressed')
```
|
github_jupyter
|
from pynq import Overlay
Overlay("base.bit").download()
from pynq.iop import Arduino_LCD18
from pynq.iop import ARDUINO
lcd = Arduino_LCD18(ARDUINO)
lcd.clear()
from PIL import Image
img = Image.open('data/large.jpg')
w_new = 160
h_new = 128
new_img = img.resize((w_new,h_new),Image.ANTIALIAS)
new_img.save('data/small.jpg','JPEG')
img.close()
lcd.display('data/board_small.jpg',x_pos=0,y_pos=127,
orientation=3,background=[255,255,255])
lcd.display('data/logo_small.png',x_pos=0,y_pos=127,
orientation=1,background=[255,255,255],frames=100)
lcd.clear()
lcd.draw_line(x_start_pos=151,y_start_pos=98,x_end_pos=19,y_end_pos=13)
lcd.draw_line(50,50,150,50,color=[255,0,0],background=[255,255,0])
lcd.draw_line(50,20,50,120,[0,0,255],[255,255,0])
text = 'Hello, PYNQ!'
lcd.print_string(1,1,text,[255,255,255],[0,0,255])
import time
text = time.strftime("%d/%m/%Y")
lcd.print_string(5,10,text,[255,255,0],[0,0,255])
lcd.draw_filled_rectangle(x_start_pos=10,y_start_pos=10,
width=60,height=80,color=[64,255,0])
lcd.draw_filled_rectangle(x_start_pos=20,y_start_pos=30,
width=80,height=30,color=[255,128,0])
lcd.draw_filled_rectangle(x_start_pos=90,y_start_pos=40,
width=70,height=120,color=[64,0,255])
button=lcd.read_joystick()
if button == 1:
print('Left')
elif button == 2:
print('Down')
elif button==3:
print('Center')
elif button==4:
print('Right')
elif button==5:
print('Up')
else:
print('Not pressed')
| 0.11737 | 0.955651 |
## ```Imports```
---
```
# standard libraries
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# modeling libraries
from classifiers_copy1 import classify
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
# preprocessing
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
# Hide all Warnings
import warnings
warnings.filterwarnings('ignore')
```
## ```Initial Modeling```
---
```
fraud = pd.read_csv('../SmallBalancedClasses.csv')
fraud.head(3)
fraud.columns
# dummy for object variables
df_fraud = fraud.copy()
df_fraud = pd.get_dummies(fraud, columns=['gender','city','state','category','merchant'])
df_fraud.shape
# setting up X/y
X = df_fraud.drop(columns=['trans_date_trans_time','lat', 'long','job','merch_lat', 'merch_long','dob','is_fraud'])
y = df_fraud['is_fraud']
# train/test split
X_train, X_test, y_train, y_test = train_test_split(X,y,random_state=42,stratify=y)
# standard scale the data
ss = StandardScaler()
X_train_ss = ss.fit_transform(X_train)
X_test_ss = ss.transform(X_test)
# Running a ton of models to select a few for hypertunning
result = classify(X_train_ss,X_test_ss,y_train,y_test)
result
# these are the best models on default settings
result[result['Train Acc']==1]
from sklearn.tree import DecisionTreeClassifier, ExtraTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix, plot_confusion_matrix
# train model
rf = RandomForestClassifier(random_state=42)
rf.fit(X_train_ss,y_train)
# score model
print(f"train Acc: {rf.score(X_train_ss,y_train)}")
print(f"test Acc: {rf.score(X_test_ss,y_test)}")
# Confusion Matrix
plot_confusion_matrix(rf, X_test_ss, y_test, display_labels=['noFraud','Fraud'])
plt.show()
pd.DataFrame(rf.feature_importances_, index=X.columns).sort_values(by = 0, ascending=False)
# setup gridsearch params for Random Forest
rf_params = {
'n_estimators': [100,125,150,175,200],
'criterion': ["gini", "entropy"],
'min_samples_split': [2,3,4,5],
'max_depth' : [4,5,6,7,8]
}
#
rf = RandomForestClassifier(random_state=42)
gs = GridSearchCV(estimator=rf, param_grid=rf_params, cv=5)
gs.fit(X_train_ss,y_train)
# the best parameters that led to the score attribute
gs.best_params_
print(f"train Acc: {gs.score(X_train_ss,y_train)}")
print(f"test Acc: {gs.score(X_test_ss,y_test)}")
```
**Default Settings:**
- Train Acc: 1
- Test Acc: 0.97
**GridSearch Best**
- Train Acc: 0.87
- Test Acc: 0.87
```
# train model
et = ExtraTreeClassifier()
et.fit(X_train_ss,y_train)
# score model
print(f"train Acc: {et.score(X_train_ss,y_train)}")
print(f"test Acc: {et.score(X_test_ss,y_test)}")
# Confusion Matrix
plot_confusion_matrix(et, X_test_ss, y_test, display_labels=['noFraud','Fraud'])
plt.show()
# setup gridsearch params Extratrees classifier
et_params = {
'splitter': ["random", "best"],
'criterion': ["gini", "entropy"],
'min_samples_split': [2,3,4,5],
'max_depth' : [4,5,6,7,8]
}
#
et = ExtraTreeClassifier(random_state=42)
gs_et = GridSearchCV(estimator=et, param_grid=et_params, cv=5, scoring = 'accuracy')
gs_et.fit(X_train_ss,y_train)
gs_et.best_params_
print(f"train Acc: {gs_et.score(X_train_ss,y_train)}")
print(f"test Acc: {gs_et.score(X_test_ss,y_test)}")
```
**Default Settings:**
- Train Acc: 1
- Test Acc: 0.84
**GridSearch Best**
- Train Acc: 0.58
- Test Acc: 0.58
```
# train model
dt = DecisionTreeClassifier()
dt.fit(X_train_ss,y_train)
# score model
print(f"train Acc: {dt.score(X_train_ss,y_train)}")
print(f"test Acc: {dt.score(X_test_ss,y_test)}")
# Confusion Matrix
plot_confusion_matrix(dt, X_test_ss, y_test, display_labels=['noFraud','Fraud'])
plt.show()
# setup gridsearch params Decisiontree classifier
dt_params = {
'splitter': ["random", "best"],
'criterion': ["gini", "entropy"],
'min_samples_split': [2,3,4,5],
'max_depth' : [4,5,6,7,8]
}
#
dt = DecisionTreeClassifier(random_state=42)
gs_dt = GridSearchCV(estimator=dt, param_grid=dt_params, cv=5, scoring = 'accuracy')
gs_dt.fit(X_train_ss,y_train)
gs_dt.best_params_
print(f"train Acc: {gs_dt.score(X_train_ss,y_train)}")
print(f"test Acc: {gs_dt.score(X_test_ss,y_test)}")
```
**Default Settings:**
- Train Acc: 1
- Test Acc: 0.96
**GridSearch Best**
- Train Acc: 0.97
- Test Acc: 0.97
```
# Confusion Matrix
plot_confusion_matrix(rf, X_test_ss, y_test, display_labels=['noFraud','Fraud'])
plt.show()
```
## Conclusion
---
1. After training mutliple models I identified 3 top performing models (DecisionTreeClassifier, ExtraTreeClassifier, RandomForestClassifier). They each showed perfect scores on training set, but after re-running the models and gridsearch hypertunning the best model was a DecisionTree Classifier with default settings.
**- Train Acc: 1.0**
**- Test Acc: 0.96**
**-Test Recall: 0.97**
2. The DecisionTree classifier has a recall score of 0.97. We never want to classify a purchase to be noFraud when in reality it was a fraudlant purchase. We would rather misclassify it as fraud and double check with the customer.
3. This will be the model used to create the online tool.
4. Next step is to train a DecisionTree classifier on the entire dataset with synthetic balanced classes.
|
github_jupyter
|
---
## ```Initial Modeling```
---
**Default Settings:**
- Train Acc: 1
- Test Acc: 0.97
**GridSearch Best**
- Train Acc: 0.87
- Test Acc: 0.87
**Default Settings:**
- Train Acc: 1
- Test Acc: 0.84
**GridSearch Best**
- Train Acc: 0.58
- Test Acc: 0.58
**Default Settings:**
- Train Acc: 1
- Test Acc: 0.96
**GridSearch Best**
- Train Acc: 0.97
- Test Acc: 0.97
| 0.495361 | 0.816297 |
# Visualization with Bitbrains Data
# Data Science Consulting Project with [Manifold.co](manifold.co)
### Modeling System Resource Usage for Predictive Scheduling
```
# Import packages
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
import glob
from pandas import read_csv, datetime
from pandas.tools.plotting import autocorrelation_plot
from dateutil.relativedelta import relativedelta
from scipy.optimize import minimize
import statsmodels.formula.api as smf
import statsmodels.tsa.api as smt
import statsmodels.api as sm
import scipy.stats as scs
from sklearn.linear_model import LassoCV, RidgeCV
from itertools import product
from tqdm import tqdm_notebook
%matplotlib inline
import warnings
warnings.filterwarnings('ignore')
```
## Read in data
```
path = r'rnd/2013-7/' # use your path
all_files = glob.glob(os.path.join(path, "*.csv")) # advisable to use os.path.join as this makes concatenation OS independent
df_from_each_file = (pd.read_csv(f, sep = ';\t').assign(VM=os.path.basename(f).split('.')[0]) for f in all_files)
concatenated_df = pd.concat(df_from_each_file)
path = r'rnd/2013-8/' # use your path
all_files = glob.glob(os.path.join(path, "*.csv")) # advisable to use os.path.join as this makes concatenation OS independent
df_from_each_file = (pd.read_csv(f, sep = ';\t').assign(VM=os.path.basename(f).split('.')[0]) for f in all_files)
concatenated_df8 = pd.concat(df_from_each_file)
path = r'rnd/2013-9/' # use your path
all_files = glob.glob(os.path.join(path, "*.csv")) # advisable to use os.path.join as this makes concatenation OS independent
df_from_each_file = (pd.read_csv(f, sep = ';\t').assign(VM=os.path.basename(f).split('.')[0]) for f in all_files)
concatenated_df9 = pd.concat(df_from_each_file)
```
## Create Data Frame
```
newdat = concatenated_df.append(concatenated_df8)
newerdat = newdat.append(concatenated_df9)
concatenated_df = newerdat
```
Check it out
```
concatenated_df.head()
```
## Feature engineering and converting pandas into a timeseries
### Timestamp is in UNIX epochs
```
concatenated_df['Timestamp'] = pd.to_datetime(concatenated_df['Timestamp [ms]'], unit = 's')
concatenated_df.apply(pd.to_numeric, errors='ignore')
# Date Feature Engineering
concatenated_df['weekday'] = concatenated_df['Timestamp'].dt.dayofweek
concatenated_df['weekend'] = ((concatenated_df.weekday) // 5 == 1).astype(float)
concatenated_df['month']=concatenated_df.Timestamp.dt.month
concatenated_df['day']=concatenated_df.Timestamp.dt.day
concatenated_df.set_index('Timestamp',inplace=True)
# Other Feature Engineering
concatenated_df["CPU usage prev"] = concatenated_df['CPU usage [%]'].shift(1)
concatenated_df["CPU_diff"] = concatenated_df['CPU usage [%]'] - concatenated_df["CPU usage prev"]
concatenated_df["received_prev"] = concatenated_df['Network received throughput [KB/s]'].shift(1)
concatenated_df["received_diff"] = concatenated_df['Network received throughput [KB/s]']- concatenated_df["received_prev"]
concatenated_df["transmitted_prev"] = concatenated_df['Network transmitted throughput [KB/s]'].shift(1)
concatenated_df["transmitted_diff"] = concatenated_df['Network transmitted throughput [KB/s]']- concatenated_df["transmitted_prev"]
```
## Fill in missing values using forward propagating function from pandas
```
concatenated_df = concatenated_df.fillna(method='ffill')
```
## Create new data frame: resampled & aggregated over each hour for all VMs
```
hourlydat = concatenated_df.resample('H').sum()
```
## Examine autocorrelations of hourly CPU usage
```
## Hourly resampled means
plt.figure(figsize=(15,5))
pd.plotting.autocorrelation_plot(hourlydat['CPU usage [MHZ]']);
```
## Is CPU Capacity Ever Met? If so, how often?
```
overprovision = pd.DataFrame(hourlydat['CPU usage [MHZ]'])
overprovision['CPU capacity provisioned'] = pd.DataFrame(hourlydat['CPU capacity provisioned [MHZ]'])
plt.style.use('seaborn-white')
overprovision.plot(figsize = (12,10),linewidth=2.5, fontsize=20)
plt.title('Is CPU Capacity Ever Met?',fontsize=22)
plt.ylabel((r'CPU [MHz] $e^{7}$'), fontsize=20);
plt.xlabel('Date', fontsize=20);
plt.tick_params(labelsize=15)
plt.xticks( fontsize = 15)
plt.legend(loc="best", fontsize =14)
plt.ticklabel_format(axis = 'y', style = 'sci', scilimits = (1,6))
plt.savefig('CPU_cap_under.png')
plt.show()
## percent CPU used across network
print("The Average CPU Percent Usage is only: " + str(round(concatenated_df['CPU usage [%]'].mean(),2)) + "%!!")
print("The Minimum CPU Percent Usage is: " + str(round(concatenated_df['CPU usage [%]'].min(),2)) + "%!!")
print("The Maximum CPU Percent Usage is: " + str(round(concatenated_df['CPU usage [%]'].max(),2)) + "%, possibly inidcating the system crashed?")
```
## What might cause over provision? Spikes in Network throughput?
### Graphs below are aggregated (summed)
```
cpu = concatenated_df[['CPU usage [MHZ]']]
receive = concatenated_df[['Network received throughput [KB/s]']]
transmit = concatenated_df[['Network transmitted throughput [KB/s]']]
provisioned = concatenated_df[['CPU capacity provisioned [MHZ]']]
hourlycpu = cpu.resample('H').sum()
hourlytransmit = transmit.resample('H').sum()
hourlyreceive = receive.resample('H').sum()
hourlyprovisioned = provisioned.resample('H').sum()
hourlytransmit.plot(color = "purple",linewidth = 4, figsize=(10, 5))
plt.title('Transmitted Throughput [KB/s] Totals \n Resampled & Aggregated Hourly',fontsize=15);
plt.ylabel('Transmitted Throughput [KB/s]', fontsize=15);
plt.xlabel('', fontsize=15);
hourlyreceive.plot( linewidth = 4, figsize=(10, 5))
plt.title('Received Throughput [KB/s] Totals \n Resampled & Aggregated Hourly',fontsize=15);
plt.ylabel('Received Throughput [KB/s]', fontsize=15);
plt.xlabel('', fontsize=15);
plt.yticks(fontsize=15);
plt.xticks(fontsize=15);
hourlyprov.plot(color = "g", linewidth = 4, figsize=(10, 5))
plt.title('CPU Provisioned Totals \n Resampled & Aggregated Hourly',fontsize=15);
plt.ylabel('CPU Capacity Provisioned [MHz] $e^{7}$', fontsize=15);
plt.xlabel('', fontsize=15);
plt.yticks(fontsize=15);
plt.xticks(fontsize=15);
hourlycpu.plot(linewidth = 4, figsize=(10, 5))
plt.title('CPU Usage Totals \n Resampled & Aggregated Hourly',fontsize=15);
plt.ylabel('CPU usage [MHz] $e^{7}$', fontsize=15);
plt.xlabel('', fontsize=15);
plt.yticks(fontsize=15);
plt.xticks(fontsize=15);
```
### Graphs below are max values across network
```
hourlycpu = cpu.resample('H').max()
hourlytransmit = transmit.resample('H').max()
hourlyreceive = receive.resample('H').max()
hourlyprovisioned = provisioned.resample('H').max()
hourlytransmit.plot(color = "purple",linewidth = 4, figsize=(10, 5))
plt.title('Transmitted Throughput [KB/s] Max',fontsize=15);
plt.ylabel('Transmitted Throughput [KB/s]', fontsize=15);
plt.xlabel('', fontsize=15);
hourlyreceive.plot( linewidth = 4, figsize=(10, 5))
plt.title('Received Throughput [KB/s] Max',fontsize=15);
plt.ylabel('Received Throughput [KB/s]', fontsize=15);
plt.xlabel('', fontsize=15);
plt.yticks(fontsize=15);
plt.xticks(fontsize=15);
hourlyprovisioned.plot(color = "g", linewidth = 4, figsize=(10, 5))
plt.title('CPU Provisioned Max',fontsize=15);
plt.ylabel('CPU Capacity Provisioned [MHz] $e^{7}$', fontsize=15);
plt.xlabel('', fontsize=15);
plt.yticks(fontsize=15);
plt.xticks(fontsize=15);
hourlycpu.plot(linewidth = 4, figsize=(10, 5))
plt.title('CPU Usage Max',fontsize=15);
plt.ylabel('CPU usage [MHz] $e^{7}$', fontsize=15);
plt.xlabel('', fontsize=15);
plt.yticks(fontsize=15);
plt.xticks(fontsize=15);
```
## Visualize rolling mean trends over time, large spike, keep in model
```
df_rm = pd.concat([receive.rolling(12).mean(), transmit.rolling(12).mean()], axis=1)
df_rm.plot(figsize=(15,5), linewidth=2, fontsize=20)
plt.xlabel('Date', fontsize=20);
df_rm = pd.concat([cpu.rolling(24).sum()], axis=1)
df_rm.plot(figsize=(15,5), linewidth=2, fontsize=20)
plt.xlabel('Date', fontsize=20);
```
## Zoom in to look at hourly trends in cpu usage
```
hourlycpu[60:120].plot(style=[':', '--', '-'])
plt.ylabel('CPU Usage Avg [MHZ]');
```
## Plots of CPU Usage Across the Week- Highly Variable!
```
hourlydat = concatenated_df.resample('H').sum()
hourlydat['Date_Time'] = hourlydat.index
hourlydat['weekday'] = hourlydat['Date_Time'].dt.dayofweek
hourlydat['weekend'] = ((hourlydat.weekday) // 5 == 1).astype(float)
```
### Feature engineering with the date
```
hourlydat['month']=hourlydat['Date_Time'].dt.month
hourlydat['day']=hourlydat['Date_Time'].dt.day
hourlydat.drop('Date_Time', axis=1, inplace=True)
hourlydat.drop('Timestamp [ms]', axis=1, inplace=True)
plotdays = hourlydat.groupby('weekday').agg({'CPU usage [MHZ]': ['mean']})
plotdays = pd.DataFrame(plotdays)
plotdays.plot(linewidth = 4, figsize=(7, 7),legend=None)
plt.title('CPU Usage Totals \n Across Days',fontsize=20);
plt.ylabel('CPU usage [MHZ]', fontsize=15);
plt.xlabel('', fontsize=15);
plt.xticks(np.arange(7), ('Mon', 'Tues', 'Wed', 'Thurs', 'Fri', 'Sat', 'Sun'), fontsize=15);
plt.yticks(fontsize=15);
plt.figure(figsize=(7,7))
plt.title('CPU Usage Totals \n Across Days',fontsize=20);
plt.ylabel('CPU usage [MHZ]', fontsize=15);
plt.yticks(fontsize=15);
plt.xlabel('', fontsize=15);
sns.boxplot(y=hourlydat['CPU usage [MHZ]'],x = hourlydat.weekday,
whis=np.inf, palette="vlag",linewidth=3)
plt.xticks(np.arange(7), ('Mon', 'Tues', 'Wed', 'Thurs', 'Fri', 'Sat', 'Sun'), fontsize=15);
plt.figure(figsize=(7,7))
plt.title('CPU Usage Lower on Weekends',fontsize=20);
plt.ylabel('CPU usage [MHZ]', fontsize=15);
plt.yticks(fontsize=15);
sns.boxplot(y=hourlydat['CPU usage [MHZ]'],x = hourlydat.weekend,
whis=np.inf, palette="vlag",linewidth=3)
plt.xticks(np.arange(2), ('Weekday', 'Weekend'), fontsize=15);
plt.xlabel('', fontsize=15);
```
## Visualize Correlations in Data (hourlydat)
```
plt.figure(figsize=(10, 8))
sns.heatmap(hourlydat.corr())
```
|
github_jupyter
|
# Import packages
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
import glob
from pandas import read_csv, datetime
from pandas.tools.plotting import autocorrelation_plot
from dateutil.relativedelta import relativedelta
from scipy.optimize import minimize
import statsmodels.formula.api as smf
import statsmodels.tsa.api as smt
import statsmodels.api as sm
import scipy.stats as scs
from sklearn.linear_model import LassoCV, RidgeCV
from itertools import product
from tqdm import tqdm_notebook
%matplotlib inline
import warnings
warnings.filterwarnings('ignore')
path = r'rnd/2013-7/' # use your path
all_files = glob.glob(os.path.join(path, "*.csv")) # advisable to use os.path.join as this makes concatenation OS independent
df_from_each_file = (pd.read_csv(f, sep = ';\t').assign(VM=os.path.basename(f).split('.')[0]) for f in all_files)
concatenated_df = pd.concat(df_from_each_file)
path = r'rnd/2013-8/' # use your path
all_files = glob.glob(os.path.join(path, "*.csv")) # advisable to use os.path.join as this makes concatenation OS independent
df_from_each_file = (pd.read_csv(f, sep = ';\t').assign(VM=os.path.basename(f).split('.')[0]) for f in all_files)
concatenated_df8 = pd.concat(df_from_each_file)
path = r'rnd/2013-9/' # use your path
all_files = glob.glob(os.path.join(path, "*.csv")) # advisable to use os.path.join as this makes concatenation OS independent
df_from_each_file = (pd.read_csv(f, sep = ';\t').assign(VM=os.path.basename(f).split('.')[0]) for f in all_files)
concatenated_df9 = pd.concat(df_from_each_file)
newdat = concatenated_df.append(concatenated_df8)
newerdat = newdat.append(concatenated_df9)
concatenated_df = newerdat
concatenated_df.head()
concatenated_df['Timestamp'] = pd.to_datetime(concatenated_df['Timestamp [ms]'], unit = 's')
concatenated_df.apply(pd.to_numeric, errors='ignore')
# Date Feature Engineering
concatenated_df['weekday'] = concatenated_df['Timestamp'].dt.dayofweek
concatenated_df['weekend'] = ((concatenated_df.weekday) // 5 == 1).astype(float)
concatenated_df['month']=concatenated_df.Timestamp.dt.month
concatenated_df['day']=concatenated_df.Timestamp.dt.day
concatenated_df.set_index('Timestamp',inplace=True)
# Other Feature Engineering
concatenated_df["CPU usage prev"] = concatenated_df['CPU usage [%]'].shift(1)
concatenated_df["CPU_diff"] = concatenated_df['CPU usage [%]'] - concatenated_df["CPU usage prev"]
concatenated_df["received_prev"] = concatenated_df['Network received throughput [KB/s]'].shift(1)
concatenated_df["received_diff"] = concatenated_df['Network received throughput [KB/s]']- concatenated_df["received_prev"]
concatenated_df["transmitted_prev"] = concatenated_df['Network transmitted throughput [KB/s]'].shift(1)
concatenated_df["transmitted_diff"] = concatenated_df['Network transmitted throughput [KB/s]']- concatenated_df["transmitted_prev"]
concatenated_df = concatenated_df.fillna(method='ffill')
hourlydat = concatenated_df.resample('H').sum()
## Hourly resampled means
plt.figure(figsize=(15,5))
pd.plotting.autocorrelation_plot(hourlydat['CPU usage [MHZ]']);
overprovision = pd.DataFrame(hourlydat['CPU usage [MHZ]'])
overprovision['CPU capacity provisioned'] = pd.DataFrame(hourlydat['CPU capacity provisioned [MHZ]'])
plt.style.use('seaborn-white')
overprovision.plot(figsize = (12,10),linewidth=2.5, fontsize=20)
plt.title('Is CPU Capacity Ever Met?',fontsize=22)
plt.ylabel((r'CPU [MHz] $e^{7}$'), fontsize=20);
plt.xlabel('Date', fontsize=20);
plt.tick_params(labelsize=15)
plt.xticks( fontsize = 15)
plt.legend(loc="best", fontsize =14)
plt.ticklabel_format(axis = 'y', style = 'sci', scilimits = (1,6))
plt.savefig('CPU_cap_under.png')
plt.show()
## percent CPU used across network
print("The Average CPU Percent Usage is only: " + str(round(concatenated_df['CPU usage [%]'].mean(),2)) + "%!!")
print("The Minimum CPU Percent Usage is: " + str(round(concatenated_df['CPU usage [%]'].min(),2)) + "%!!")
print("The Maximum CPU Percent Usage is: " + str(round(concatenated_df['CPU usage [%]'].max(),2)) + "%, possibly inidcating the system crashed?")
cpu = concatenated_df[['CPU usage [MHZ]']]
receive = concatenated_df[['Network received throughput [KB/s]']]
transmit = concatenated_df[['Network transmitted throughput [KB/s]']]
provisioned = concatenated_df[['CPU capacity provisioned [MHZ]']]
hourlycpu = cpu.resample('H').sum()
hourlytransmit = transmit.resample('H').sum()
hourlyreceive = receive.resample('H').sum()
hourlyprovisioned = provisioned.resample('H').sum()
hourlytransmit.plot(color = "purple",linewidth = 4, figsize=(10, 5))
plt.title('Transmitted Throughput [KB/s] Totals \n Resampled & Aggregated Hourly',fontsize=15);
plt.ylabel('Transmitted Throughput [KB/s]', fontsize=15);
plt.xlabel('', fontsize=15);
hourlyreceive.plot( linewidth = 4, figsize=(10, 5))
plt.title('Received Throughput [KB/s] Totals \n Resampled & Aggregated Hourly',fontsize=15);
plt.ylabel('Received Throughput [KB/s]', fontsize=15);
plt.xlabel('', fontsize=15);
plt.yticks(fontsize=15);
plt.xticks(fontsize=15);
hourlyprov.plot(color = "g", linewidth = 4, figsize=(10, 5))
plt.title('CPU Provisioned Totals \n Resampled & Aggregated Hourly',fontsize=15);
plt.ylabel('CPU Capacity Provisioned [MHz] $e^{7}$', fontsize=15);
plt.xlabel('', fontsize=15);
plt.yticks(fontsize=15);
plt.xticks(fontsize=15);
hourlycpu.plot(linewidth = 4, figsize=(10, 5))
plt.title('CPU Usage Totals \n Resampled & Aggregated Hourly',fontsize=15);
plt.ylabel('CPU usage [MHz] $e^{7}$', fontsize=15);
plt.xlabel('', fontsize=15);
plt.yticks(fontsize=15);
plt.xticks(fontsize=15);
hourlycpu = cpu.resample('H').max()
hourlytransmit = transmit.resample('H').max()
hourlyreceive = receive.resample('H').max()
hourlyprovisioned = provisioned.resample('H').max()
hourlytransmit.plot(color = "purple",linewidth = 4, figsize=(10, 5))
plt.title('Transmitted Throughput [KB/s] Max',fontsize=15);
plt.ylabel('Transmitted Throughput [KB/s]', fontsize=15);
plt.xlabel('', fontsize=15);
hourlyreceive.plot( linewidth = 4, figsize=(10, 5))
plt.title('Received Throughput [KB/s] Max',fontsize=15);
plt.ylabel('Received Throughput [KB/s]', fontsize=15);
plt.xlabel('', fontsize=15);
plt.yticks(fontsize=15);
plt.xticks(fontsize=15);
hourlyprovisioned.plot(color = "g", linewidth = 4, figsize=(10, 5))
plt.title('CPU Provisioned Max',fontsize=15);
plt.ylabel('CPU Capacity Provisioned [MHz] $e^{7}$', fontsize=15);
plt.xlabel('', fontsize=15);
plt.yticks(fontsize=15);
plt.xticks(fontsize=15);
hourlycpu.plot(linewidth = 4, figsize=(10, 5))
plt.title('CPU Usage Max',fontsize=15);
plt.ylabel('CPU usage [MHz] $e^{7}$', fontsize=15);
plt.xlabel('', fontsize=15);
plt.yticks(fontsize=15);
plt.xticks(fontsize=15);
df_rm = pd.concat([receive.rolling(12).mean(), transmit.rolling(12).mean()], axis=1)
df_rm.plot(figsize=(15,5), linewidth=2, fontsize=20)
plt.xlabel('Date', fontsize=20);
df_rm = pd.concat([cpu.rolling(24).sum()], axis=1)
df_rm.plot(figsize=(15,5), linewidth=2, fontsize=20)
plt.xlabel('Date', fontsize=20);
hourlycpu[60:120].plot(style=[':', '--', '-'])
plt.ylabel('CPU Usage Avg [MHZ]');
hourlydat = concatenated_df.resample('H').sum()
hourlydat['Date_Time'] = hourlydat.index
hourlydat['weekday'] = hourlydat['Date_Time'].dt.dayofweek
hourlydat['weekend'] = ((hourlydat.weekday) // 5 == 1).astype(float)
hourlydat['month']=hourlydat['Date_Time'].dt.month
hourlydat['day']=hourlydat['Date_Time'].dt.day
hourlydat.drop('Date_Time', axis=1, inplace=True)
hourlydat.drop('Timestamp [ms]', axis=1, inplace=True)
plotdays = hourlydat.groupby('weekday').agg({'CPU usage [MHZ]': ['mean']})
plotdays = pd.DataFrame(plotdays)
plotdays.plot(linewidth = 4, figsize=(7, 7),legend=None)
plt.title('CPU Usage Totals \n Across Days',fontsize=20);
plt.ylabel('CPU usage [MHZ]', fontsize=15);
plt.xlabel('', fontsize=15);
plt.xticks(np.arange(7), ('Mon', 'Tues', 'Wed', 'Thurs', 'Fri', 'Sat', 'Sun'), fontsize=15);
plt.yticks(fontsize=15);
plt.figure(figsize=(7,7))
plt.title('CPU Usage Totals \n Across Days',fontsize=20);
plt.ylabel('CPU usage [MHZ]', fontsize=15);
plt.yticks(fontsize=15);
plt.xlabel('', fontsize=15);
sns.boxplot(y=hourlydat['CPU usage [MHZ]'],x = hourlydat.weekday,
whis=np.inf, palette="vlag",linewidth=3)
plt.xticks(np.arange(7), ('Mon', 'Tues', 'Wed', 'Thurs', 'Fri', 'Sat', 'Sun'), fontsize=15);
plt.figure(figsize=(7,7))
plt.title('CPU Usage Lower on Weekends',fontsize=20);
plt.ylabel('CPU usage [MHZ]', fontsize=15);
plt.yticks(fontsize=15);
sns.boxplot(y=hourlydat['CPU usage [MHZ]'],x = hourlydat.weekend,
whis=np.inf, palette="vlag",linewidth=3)
plt.xticks(np.arange(2), ('Weekday', 'Weekend'), fontsize=15);
plt.xlabel('', fontsize=15);
plt.figure(figsize=(10, 8))
sns.heatmap(hourlydat.corr())
| 0.481698 | 0.78785 |
<a href="https://colab.research.google.com/github/ndkrishna/demo-self-driving/blob/master/R6_ExternalLab_AIML.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
### A MNIST-like fashion product database
In this, we classify the images into respective classes given in the dataset. We use a Neural Net and a Deep Neural Net in Keras to solve this and check the accuracy scores.
### Load tensorflow
```
%tensorflow_version 1.x
import tensorflow as tf
tf.random.set_random_seed(42)
tf.__version__
```
### Collect Data
```
import keras
```
Fashion-MNIST is a dataset of Zalando's article images consisting of a training set of 60,000 examples and a test set of 10,000 examples. Each example is a 28x28 grayscale image, associated with a label from 10 classes. There are 7000 images per class.
```
(trainX, trainY), (testX, testY) = keras.datasets.fashion_mnist.load_data()
print(testY[0:5])
trainX.shape
trainY.shape
trainY
import matplotlib.pyplot as plt
#Trying to understand the data with one picture sample of every class
i = 0
counter = 0
plt.figure(figsize = (20,10))
while i < 10:
if trainY[counter]==i:
plt.subplot(1, 10, i+1)
gray = trainX[counter]
plt.imshow(gray, cmap = plt.get_cmap(name = 'gray'))
i = i + 1
plt.xlabel(trainY[counter])
counter = counter + 1
plt.show()
```
### Convert both training and testing labels into one-hot vectors.
**Hint:** check **tf.keras.utils.to_categorical()**
```
#One-hot encode the class vector
#convert class vectors (integers) to binary class matrix
#convert trainY and testY
#number of classes: 10
#we are doing this to use categorical_crossentropy as loss
trainY_Original = trainY.copy()
testY_Original = trainY.copy()
trainY = tf.keras.utils.to_categorical(trainY, num_classes=10)
testY = tf.keras.utils.to_categorical(testY, num_classes=10)
print(trainY.shape)
print('First 5 examples now are: ', trainY[0:5])
```
### Visualize the data
Plot first 10 images in the triaining set and their labels.
```
import matplotlib.pyplot as plt
#Trying to understand the data with one picture sample
i = 0
plt.figure(figsize = (20,10))
while i < 10:
plt.subplot(1, 10, i+1)
gray = trainX[i]
plt.imshow(gray, cmap = plt.get_cmap(name = 'gray'))
i = i + 1
plt.xlabel(trainY[i])
```
### Build a neural Network with a cross entropy loss function and sgd optimizer in Keras. The output layer with 10 neurons as we have 10 classes.
```
# Initialize Sequential model
model = tf.keras.models.Sequential()
# Reshape data from 2D to 1D -> 28x28 to 784
model.add(tf.keras.layers.Reshape((784,),input_shape=(28,28,)))
# Add Dense Layer which provides 10 Outputs after applying softmax
model.add(tf.keras.layers.Dense(10, activation='softmax'))
# Comile the model
model.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
```
### Execute the model using model.fit()
```
model.fit(trainX,trainY,
validation_data=(testX,testY),
epochs=5,
batch_size=32)
```
### In the above Neural Network model add Batch Normalization layer after the input layer and repeat the steps.
```
#Initialize Sequential model
model = tf.keras.models.Sequential()
#Reshape data from 2D to 1D -> 28x28 to 784
model.add(tf.keras.layers.Reshape((784,),input_shape=(28,28,)))
#Normalize the data
model.add(tf.keras.layers.BatchNormalization())
# Add Dense Layer which provides 10 Outputs after applying softmax
model.add(tf.keras.layers.Dense(10, activation='softmax'))
# Comile the model
model.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
```
### Execute the model
```
model.fit(trainX,trainY,
validation_data=(testX,testY),
epochs=5,
batch_size=32)
out = model.predict_classes(testX)
print(out)
print(testY_Original)
test_loss, test_acc = model.evaluate(testX, testY)
print('Test accuracy:', test_acc)
```
### Customize the learning rate to 0.001 in sgd optimizer and run the model
```
# Comile the model
#Create optimizer with non-default learning rate
sgd_optimizer = tf.keras.optimizers.SGD(lr=0.001)
model.compile(optimizer=sgd_optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(trainX,trainY,
validation_data=(testX,testY),
epochs=5,
batch_size=32)
```
### Build the Neural Network model with 3 Dense layers with 100,100,10 neurons respectively in each layer. Use cross entropy loss function and singmoid as activation in the hidden layers and softmax as activation function in the output layer. Use sgd optimizer with learning rate 0.03.
```
#Initialize Sequential model
model = tf.keras.models.Sequential()
#Reshape data from 2D to 1D -> 28x28 to 784
model.add(tf.keras.layers.Reshape((784,),input_shape=(28,28,)))
#Normalize the data
model.add(tf.keras.layers.BatchNormalization())
# Add Dense Layer which provides 10 Outputs after applying softmax
model.add(tf.keras.layers.Dense(10, activation='softmax'))
# Comile the model
#Create optimizer with non-default learning rate
sgd_optimizer = tf.keras.optimizers.SGD(lr=0.001)
model.compile(optimizer=sgd_optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
```
## Review model
```
```
### Run the model
```
```
|
github_jupyter
|
%tensorflow_version 1.x
import tensorflow as tf
tf.random.set_random_seed(42)
tf.__version__
import keras
(trainX, trainY), (testX, testY) = keras.datasets.fashion_mnist.load_data()
print(testY[0:5])
trainX.shape
trainY.shape
trainY
import matplotlib.pyplot as plt
#Trying to understand the data with one picture sample of every class
i = 0
counter = 0
plt.figure(figsize = (20,10))
while i < 10:
if trainY[counter]==i:
plt.subplot(1, 10, i+1)
gray = trainX[counter]
plt.imshow(gray, cmap = plt.get_cmap(name = 'gray'))
i = i + 1
plt.xlabel(trainY[counter])
counter = counter + 1
plt.show()
#One-hot encode the class vector
#convert class vectors (integers) to binary class matrix
#convert trainY and testY
#number of classes: 10
#we are doing this to use categorical_crossentropy as loss
trainY_Original = trainY.copy()
testY_Original = trainY.copy()
trainY = tf.keras.utils.to_categorical(trainY, num_classes=10)
testY = tf.keras.utils.to_categorical(testY, num_classes=10)
print(trainY.shape)
print('First 5 examples now are: ', trainY[0:5])
import matplotlib.pyplot as plt
#Trying to understand the data with one picture sample
i = 0
plt.figure(figsize = (20,10))
while i < 10:
plt.subplot(1, 10, i+1)
gray = trainX[i]
plt.imshow(gray, cmap = plt.get_cmap(name = 'gray'))
i = i + 1
plt.xlabel(trainY[i])
# Initialize Sequential model
model = tf.keras.models.Sequential()
# Reshape data from 2D to 1D -> 28x28 to 784
model.add(tf.keras.layers.Reshape((784,),input_shape=(28,28,)))
# Add Dense Layer which provides 10 Outputs after applying softmax
model.add(tf.keras.layers.Dense(10, activation='softmax'))
# Comile the model
model.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
model.fit(trainX,trainY,
validation_data=(testX,testY),
epochs=5,
batch_size=32)
#Initialize Sequential model
model = tf.keras.models.Sequential()
#Reshape data from 2D to 1D -> 28x28 to 784
model.add(tf.keras.layers.Reshape((784,),input_shape=(28,28,)))
#Normalize the data
model.add(tf.keras.layers.BatchNormalization())
# Add Dense Layer which provides 10 Outputs after applying softmax
model.add(tf.keras.layers.Dense(10, activation='softmax'))
# Comile the model
model.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
model.fit(trainX,trainY,
validation_data=(testX,testY),
epochs=5,
batch_size=32)
out = model.predict_classes(testX)
print(out)
print(testY_Original)
test_loss, test_acc = model.evaluate(testX, testY)
print('Test accuracy:', test_acc)
# Comile the model
#Create optimizer with non-default learning rate
sgd_optimizer = tf.keras.optimizers.SGD(lr=0.001)
model.compile(optimizer=sgd_optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(trainX,trainY,
validation_data=(testX,testY),
epochs=5,
batch_size=32)
#Initialize Sequential model
model = tf.keras.models.Sequential()
#Reshape data from 2D to 1D -> 28x28 to 784
model.add(tf.keras.layers.Reshape((784,),input_shape=(28,28,)))
#Normalize the data
model.add(tf.keras.layers.BatchNormalization())
# Add Dense Layer which provides 10 Outputs after applying softmax
model.add(tf.keras.layers.Dense(10, activation='softmax'))
# Comile the model
#Create optimizer with non-default learning rate
sgd_optimizer = tf.keras.optimizers.SGD(lr=0.001)
model.compile(optimizer=sgd_optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
```
### Run the model
| 0.735547 | 0.98944 |
# Section 0 问题描述与完成项目流程
## 1. 问题描述
<img src="default.png" width="20%"></img>
在该项目中,你将使用强化学习算法,实现一个自动走迷宫机器人。
1. 如上图所示,智能机器人显示在右上角。在我们的迷宫中,有陷阱(红色炸弹)及终点(蓝色的目标点)两种情景。机器人要尽量避开陷阱、尽快到达目的地。
2. 小车可执行的动作包括:向上走 `u`、向右走 `r`、向下走 `d`、向左走 `l`。
3. 执行不同的动作后,根据不同的情况会获得不同的奖励,具体而言,有以下几种情况。
- 撞到墙壁:-10
- 走到终点:50
- 走到陷阱:-30
- 其余情况:-0.1
4. 我们需要通过修改 `robot.py` 中的代码,来实现一个 Q Learning 机器人,实现上述的目标。
## 2. 完成项目流程
1. 配置环境,使用 `envirnment.yml` 文件配置名为 `robot-env` 的 conda 环境,具体而言,你只需转到当前的目录,在命令行/终端中运行如下代码,稍作等待即可。
```
conda env create -f envirnment.yml
```
安装完毕后,在命令行/终端中运行 `source activate robot-env`(Mac/Linux 系统)或 `activate robot-env`(Windows 系统)激活该环境。
2. 阅读 `main.ipynb` 中的指导完成项目,并根据指导修改对应的代码,生成、观察结果。
3. 导出代码与报告,上传文件,提交审阅并优化。
---
---
# Section 1 算法理解
## 1. 1 强化学习总览
强化学习作为机器学习算法的一种,其模式也是让智能体在“训练”中学到“经验”,以实现给定的任务。但不同于监督学习与非监督学习,在强化学习的框架中,我们更侧重通过智能体与环境的**交互**来学习。通常在监督学习和非监督学习任务中,智能体往往需要通过给定的训练集,辅之以既定的训练目标(如最小化损失函数),通过给定的学习算法来实现这一目标。然而在强化学习中,智能体则是通过其与环境交互得到的奖励进行学习。这个环境可以是虚拟的(如虚拟的迷宫),也可以是真实的(自动驾驶汽车在真实道路上收集数据)。
在强化学习中有五个核心组成部分,它们分别是:**环境(Environment)**、**智能体(Agent)**、**状态(State)**、**动作(Action)**和**奖励(Reward)**。在某一时间节点 $t$:
- 智能体在从环境中感知其所处的状态 $s_t$
- 智能体根据某些准则选择动作 $a_t$
- 环境根据智能体选择的动作,向智能体反馈奖励 $r_{t+1}$
通过合理的学习算法,智能体将在这样的问题设置下,成功学到一个在状态 $s_t$ 选择动作 $a_t$ 的策略 $\pi (s_t) = a_t$。
---
**问题 1**:请参照如上的定义,描述出 “机器人走迷宫这个问题” 中强化学习五个组成部分对应的实际对象:
- **环境** : 迷宫环境
- **状态** : 撞到墙壁,踩到陷阱,到达终点,其余情况
- **动作** : 前后左右
- **奖励** : -30,-10,-0.1,+50
$$T(s^{'}, a, s) = P(s^{'}|a,s)$$
---
## 1.2 计算 Q 值
在我们的项目中,我们要实现基于 Q-Learning 的强化学习算法。Q-Learning 是一个值迭代(Value Iteration)算法。与策略迭代(Policy Iteration)算法不同,值迭代算法会计算每个”状态“或是”状态-动作“的值(Value)或是效用(Utility),然后在执行动作的时候,会设法最大化这个值。因此,对每个状态值的准确估计,是我们值迭代算法的核心。通常我们会考虑**最大化动作的长期奖励**,即不仅考虑当前动作带来的奖励,还会考虑动作长远的奖励。
在 Q-Learning 算法中,我们把这个长期奖励记为 Q 值,我们会考虑每个 ”状态-动作“ 的 Q 值,具体而言,它的计算公式为:
$$
q(s_{t},a) = R_{t+1} + \gamma \times\max_a q(a,s_{t+1})
$$
也就是对于当前的“状态-动作” $(s_{t},a)$,我们考虑执行动作 $a$ 后环境给我们的奖励 $R_{t+1}$,以及执行动作 $a$ 到达 $s_{t+1}$后,执行任意动作能够获得的最大的Q值 $\max_a q(a,s_{t+1})$,$\gamma$ 为折扣因子。
不过一般地,我们使用更为保守地更新 Q 表的方法,即引入松弛变量 $alpha$,按如下的公式进行更新,使得 Q 表的迭代变化更为平缓。
$$
q(s_{t},a) = (1-\alpha) \times q(s_{t},a) + \alpha \times(R_{t+1} + \gamma \times\max_a q(a,s_{t+1}))
$$
---
<img src="default2.png" width="20%"></img>
**问题 2**:根据已知条件求 $q(s_{t},a)$,在如下模板代码中的空格填入对应的数字即可。
已知:如上图,机器人位于 $s_1$,行动为 `u`,行动获得的奖励与题目的默认设置相同。在 $s_2$ 中执行各动作的 Q 值为:`u`: -24,`r`: -13,`d`: -0.29、`l`: +40,$\gamma$ 取0.9。
$$
\begin{align}
q(s_{t},a) & = R_{t+1} + \gamma \times\max_a q(a,s_{t+1}) \\
& =(0.1) + (0.9)*(40) \\
& =(36.1)
\end{align}
$$
---
## 1.3 如何选择动作
在强化学习中,「探索-利用」问题是非常重要的问题。具体来说,根据上面的定义,我们会尽可能地让机器人在每次选择最优的决策,来最大化长期奖励。但是这样做有如下的弊端:
1. 在初步的学习中,我们的 Q 值会不准确,如果在这个时候都按照 Q 值来选择,那么会造成错误。
2. 学习一段时间后,机器人的路线会相对固定,则机器人无法对环境进行有效的探索。
因此我们需要一种办法,来解决如上的问题,增加机器人的探索。由此我们考虑使用 epsilon-greedy 算法,即在小车选择动作的时候,以一部分的概率随机选择动作,以一部分的概率按照最优的 Q 值选择动作。同时,这个选择随机动作的概率应当随着训练的过程逐步减小。
---
**问题 3**:在如下的代码块中,实现 epsilon-greedy 算法的逻辑,并运行测试代码。
```
import random
import numpy as np
actions = ['u','r','d','l']
qline = {'u':1.2, 'r':-2.1, 'd':-24.5, 'l':27}
epsilon = 0.3 # 以0.3的概率进行随机选择
def choose_action(actions,qline,epsilon):
action = None
if random.random()<epsilon: # 以某一概率
action=np.random.choice(actions) # 实现对动作的随机选择
else:
action=max(qline,key=qline.get) # 否则选择具有最大 Q 值的动作
return action
action=choose_action(actions,qline,epsilon)
print(action)
```
---
---
# Section 2 代码实现
## 2.1. `Maze` 类理解
我们首先引入了迷宫类 `Maze`,这是一个非常强大的函数,它能够根据你的要求随机创建一个迷宫,或者根据指定的文件,读入一个迷宫地图信息。
1. 使用 `Maze("file_name")` 根据指定文件创建迷宫,或者使用 `Maze(maze_size=(height,width))` 来随机生成一个迷宫。
2. 使用 `trap_number` 参数,在创建迷宫的时候,设定迷宫中陷阱的数量。
3. 直接键入迷宫变量的名字按回车,展示迷宫图像(如 `g=Maze("xx.txt")`,那么直接输入 `g` 即可。
4. 建议生成的迷宫尺寸,长在 6~12 之间,宽在 10~12 之间。
---
**问题 4**:在如下的代码块中,创建你的迷宫并展示。
```
from Maze import Maze
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
## todo: 创建迷宫并展示
mario=Maze(maze_size=(8,12),trap_number=2)
mario
```
---
你可能已经注意到,在迷宫中我们已经默认放置了一个机器人。实际上,我们为迷宫配置了相应的 API,来帮助机器人的移动与感知。其中你随后会使用的两个 API 为 `maze.sense_robot()` 及 `maze.move_robot()`。
1. `maze.sense_robot()` 为一个无参数的函数,输出机器人在迷宫中目前的位置。
2. `maze.move_robot(direction)` 对输入的移动方向,移动机器人,并返回对应动作的奖励值。
---
**问题 5**:随机移动机器人,并记录下获得的奖励,展示出机器人最后的位置。
```
rewards = []
## 循环、随机移动机器人10次,记录下奖励
for i in range(10):
action=np.random.choice(actions)
rewards.append(mario.move_robot(action))
## 输出机器人最后的位置
print(mario.sense_robot())
print(rewards)
## 打印迷宫,观察机器人位置
```
## 2.2. `Robot` 类实现
`Robot` 类是我们需要重点实现的部分。在这个类中,我们需要实现诸多功能,以使得我们成功实现一个强化学习智能体。总体来说,之前我们是人为地在环境中移动了机器人,但是现在通过实现 `Robot` 这个类,机器人将会自己移动。通过实现学习函数,`Robot` 类将会学习到如何选择最优的动作,并且更新强化学习中对应的参数。
首先 `Robot` 有多个输入,其中 `alpha=0.5, gamma=0.9, epsilon0=0.5` 表征强化学习相关的各个参数的默认值,这些在之前你已经了解到,`Maze` 应为机器人所在迷宫对象。
随后观察 `Robot.update` 函数,它指明了在每次执行动作时,`Robot` 需要执行的程序。按照这些程序,各个函数的功能也就明了了。
最后你需要实现 `Robot.py` 代码中的8段代码,他们都在代码中以 `#TODO` 进行标注,你能轻松地找到他们。
---
**问题 6**:实现 `Robot.py` 中的8段代码,并运行如下代码检查效果(记得将 `maze` 变量修改为你创建迷宫的变量名)。
```
from Robot import Robot
robot = Robot(mario) # 记得将 maze 变量修改为你创建迷宫的变量名
robot.set_status(learning=True,testing=False)
print(robot.update())
mario
```
---
## 2.3 用 `Runner` 类训练 Robot
在实现了上述内容之后,我们就可以开始对我们 `Robot` 进行训练并调参了。我们为你准备了又一个非常棒的类 `Runner`,来实现整个训练过程及可视化。使用如下的代码,你可以成功对机器人进行训练。并且你会在当前文件夹中生成一个名为 `filename` 的视频,记录了整个训练的过程。通过观察该视频,你能够发现训练过程中的问题,并且优化你的代码及参数。
---
**问题 7**:尝试利用下列代码训练机器人,并进行调参。可选的参数包括:
- 训练参数
- 训练次数 `epoch`
- 机器人参数:
- `epsilon0` (epsilon 初值)
- `epsilon`衰减(可以是线性、指数衰减,可以调整衰减的速度),你需要在 Robot.py 中调整
- `alpha`
- `gamma`
- 迷宫参数:
- 迷宫大小
- 迷宫中陷阱的数量
```
## 可选的参数:
epoch = 500
epsilon0 = 0.05
alpha = 0.5
gamma = 0.5
maze_size = (8,12)
trap_number = 2
from Runner import Runner
g = Maze(maze_size=maze_size,trap_number=trap_number)
r = Robot(g,alpha=alpha, epsilon0=epsilon0, gamma=gamma)
r.set_status(learning=True)
runner = Runner(r, g)
runner.run_training(epoch, display_direction=True)
#runner.generate_movie(filename = "final1.mp4") # 你可以注释该行代码,加快运行速度,不过你就无法观察到视频了。
```
---
使用 `runner.plot_results()` 函数,能够打印机器人在训练过程中的一些参数信息。
- Success Times 代表机器人在训练过程中成功的累计次数,这应当是一个累积递增的图像。
- Accumulated Rewards 代表机器人在每次训练 epoch 中,获得的累积奖励的值,这应当是一个逐步递增的图像。
- Running Times per Epoch 代表在每次训练 epoch 中,小车训练的次数(到达终点就会停止该 epoch 转入下次训练),这应当是一个逐步递减的图像。
---
**问题 8**:使用 `runner.plot_results()` 输出训练结果,根据该结果对你的机器人进行分析。
- 指出你选用的参数如何,选用参数的原因。
- 建议你比较不同参数下机器人的训练的情况。
- 训练的结果是否满意,有何改进的计划。
```
runner.plot_results()
epoch = 500
epsilon0 = 0.05
#alpha = 0.5
gamma = 0.5
maze_size = (8,12)
trap_number = 2
for alpha in np.linspace(0.1,0.9,9):
g = Maze(maze_size=maze_size,trap_number=trap_number)
r = Robot(g,alpha=alpha, epsilon0=epsilon0, gamma=gamma)
r.set_status(learning=True)
runner = Runner(r, g)
runner.run_training(epoch, display_direction=True)
runner.plot_results()
```
(回答区)
---
**问题 9:** 请将如下的文件打包,提交文件给审阅者。
- `robot.py`
- `robot_maze.ipynb`
- 由 `robot_maze.ipynb` 导出的 `robot_maze.html`
|
github_jupyter
|
conda env create -f envirnment.yml
import random
import numpy as np
actions = ['u','r','d','l']
qline = {'u':1.2, 'r':-2.1, 'd':-24.5, 'l':27}
epsilon = 0.3 # 以0.3的概率进行随机选择
def choose_action(actions,qline,epsilon):
action = None
if random.random()<epsilon: # 以某一概率
action=np.random.choice(actions) # 实现对动作的随机选择
else:
action=max(qline,key=qline.get) # 否则选择具有最大 Q 值的动作
return action
action=choose_action(actions,qline,epsilon)
print(action)
from Maze import Maze
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
## todo: 创建迷宫并展示
mario=Maze(maze_size=(8,12),trap_number=2)
mario
rewards = []
## 循环、随机移动机器人10次,记录下奖励
for i in range(10):
action=np.random.choice(actions)
rewards.append(mario.move_robot(action))
## 输出机器人最后的位置
print(mario.sense_robot())
print(rewards)
## 打印迷宫,观察机器人位置
from Robot import Robot
robot = Robot(mario) # 记得将 maze 变量修改为你创建迷宫的变量名
robot.set_status(learning=True,testing=False)
print(robot.update())
mario
## 可选的参数:
epoch = 500
epsilon0 = 0.05
alpha = 0.5
gamma = 0.5
maze_size = (8,12)
trap_number = 2
from Runner import Runner
g = Maze(maze_size=maze_size,trap_number=trap_number)
r = Robot(g,alpha=alpha, epsilon0=epsilon0, gamma=gamma)
r.set_status(learning=True)
runner = Runner(r, g)
runner.run_training(epoch, display_direction=True)
#runner.generate_movie(filename = "final1.mp4") # 你可以注释该行代码,加快运行速度,不过你就无法观察到视频了。
runner.plot_results()
epoch = 500
epsilon0 = 0.05
#alpha = 0.5
gamma = 0.5
maze_size = (8,12)
trap_number = 2
for alpha in np.linspace(0.1,0.9,9):
g = Maze(maze_size=maze_size,trap_number=trap_number)
r = Robot(g,alpha=alpha, epsilon0=epsilon0, gamma=gamma)
r.set_status(learning=True)
runner = Runner(r, g)
runner.run_training(epoch, display_direction=True)
runner.plot_results()
| 0.240953 | 0.956145 |
# Hybrid quantum-classical Neural Networks with PyTorch and Qiskit
Machine learning (ML) has established itself as a successful interdisciplinary field which seeks to mathematically extract generalizable information from data. Throwing in quantum computing gives rise to interesting areas of research which seek to leverage the principles of quantum mechanics to augment machine learning or vice-versa. Whether you're aiming to enhance classical ML algorithms by outsourcing difficult calculations to a quantum computer or optimise quantum algorithms using classical ML architectures - both fall under the diverse umbrella of quantum machine learning (QML).
In this chapter, we explore how a classical neural network can be partially quantized to create a hybrid quantum-classical neural network. We will code up a simple example that integrates **Qiskit** with a state-of-the-art open-source software package - **[PyTorch](https://pytorch.org/)**. The purpose of this example is to demonstrate the ease of integrating Qiskit with existing ML tools and to encourage ML practitioners to explore what is possible with quantum computing.
## Contents
1. [How Does it Work?](#how)
1.1 [Preliminaries](#prelims)
2. [So How Does Quantum Enter the Picture?](#quantumlayer)
3. [Let's code!](#code)
3.1 [Imports](#imports)
3.2 [Create a "Quantum Class" with Qiskit](#q-class)
3.3 [Create a "Quantum-Classical Class" with PyTorch](#qc-class)
3.4 [Data Loading and Preprocessing](#data-loading-preprocessing)
3.5 [Creating the Hybrid Neural Network](#hybrid-nn)
3.6 [Training the Network](#training)
3.7 [Testing the Network](#testing)
4. [What Now?](#what-now)
## 1. How does it work? <a id='how'></a>
<img src="hybridnetwork.png" />
**Fig.1** Illustrates the framework we will construct in this chapter. Ultimately, we will create a hybrid quantum-classical neural network that seeks to classify hand drawn digits. Note that the edges shown in this image are all directed downward; however, the directionality is not visually indicated.
### 1.1 Preliminaries <a id='prelims'></a>
The background presented here on classical neural networks is included to establish relevant ideas and shared terminology; however, it is still extremely high-level. __If you'd like to dive one step deeper into classical neural networks, see the well made video series by youtuber__ [3Blue1Brown](https://youtu.be/aircAruvnKk). Alternatively, if you are already familiar with classical networks, you can [skip to the next section](#quantumlayer).
###### Neurons and Weights
A neural network is ultimately just an elaborate function that is built by composing smaller building blocks called neurons. A ***neuron*** is typically a simple, easy-to-compute, and nonlinear function that maps one or more inputs to a single real number. The single output of a neuron is typically copied and fed as input into other neurons. Graphically, we represent neurons as nodes in a graph and we draw directed edges between nodes to indicate how the output of one neuron will be used as input to other neurons. It's also important to note that each edge in our graph is often associated with a scalar-value called a [***weight***](https://en.wikipedia.org/wiki/Artificial_neural_network#Connections_and_weights). The idea here is that each of the inputs to a neuron will be multiplied by a different scalar before being collected and processed into a single value. The objective when training a neural network consists primarily of choosing our weights such that the network behaves in a particular way.
###### Feed Forward Neural Networks
It is also worth noting that the particular type of neural network we will concern ourselves with is called a **[feed-forward neural network (FFNN)](https://en.wikipedia.org/wiki/Feedforward_neural_network)**. This means that as data flows through our neural network, it will never return to a neuron it has already visited. Equivalently, you could say that the graph which describes our neural network is a **[directed acyclic graph (DAG)](https://en.wikipedia.org/wiki/Directed_acyclic_graph)**. Furthermore, we will stipulate that neurons within the same layer of our neural network will not have edges between them.
###### IO Structure of Layers
The input to a neural network is a classical (real-valued) vector. Each component of the input vector is multiplied by a different weight and fed into a layer of neurons according to the graph structure of the network. After each neuron in the layer has been evaluated, the results are collected into a new vector where the i'th component records the output of the i'th neuron. This new vector can then be treated as an input for a new layer, and so on. We will use the standard term ***hidden layer*** to describe all but the first and last layers of our network.
## 2. So How Does Quantum Enter the Picture? <a id='quantumlayer'> </a>
To create a quantum-classical neural network, one can implement a hidden layer for our neural network using a parameterized quantum circuit. By "parameterized quantum circuit", we mean a quantum circuit where the rotation angles for each gate are specified by the components of a classical input vector. The outputs from our neural network's previous layer will be collected and used as the inputs for our parameterized circuit. The measurement statistics of our quantum circuit can then be collected and used as inputs for the following layer. A simple example is depicted below:
<img src="neuralnetworkQC.png" />
Here, $\sigma$ is a [nonlinear function](https://en.wikipedia.org/wiki/Activation_function) and $h_i$ is the value of neuron $i$ at each hidden layer. $R(h_i)$ represents any rotation gate about an angle equal to $h_i$ and $y$ is the final prediction value generated from the hybrid network.
### What about backpropagation?
If you're familiar with classical ML, you may immediately be wondering *how do we calculate gradients when quantum circuits are involved?* This would be necessary to enlist powerful optimisation techniques such as **[gradient descent](https://en.wikipedia.org/wiki/Gradient_descent)**. It gets a bit technical, but in short, we can view a quantum circuit as a black box and the gradient of this black box with respect to its parameters can be calculated as follows:
<img src="quantumgradient.png" />
where $\theta$ represents the parameters of the quantum circuit and $s$ is a macroscopic shift. The gradient is then simply the difference between our quantum circuit evaluated at $\theta+s$ and $\theta - s$. Thus, we can systematically differentiate our quantum circuit as part of a larger backpropogation routine. This closed form rule for calculating the gradient of quantum circuit parameters is known as **[the parameter shift rule](https://arxiv.org/pdf/1905.13311.pdf)**.
```
# install packages needed for this specific page
!pip install torchvision
```
## 3. Let's code! <a id='code'></a>
### 3.1 Imports <a id='imports'></a>
First, we import some handy packages that we will need, including Qiskit and PyTorch.
```
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.autograd import Function
from torchvision import datasets, transforms
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import qiskit
from qiskit import transpile, assemble
from qiskit.visualization import *
```
### 3.2 Create a "Quantum Class" with Qiskit <a id='q-class'></a>
We can conveniently put our Qiskit quantum functions into a class. First, we specify how many trainable quantum parameters and how many shots we wish to use in our quantum circuit. In this example, we will keep it simple and use a 1-qubit circuit with one trainable quantum parameter $\theta$. We hard code the circuit for simplicity and use a $RY-$rotation by the angle $\theta$ to train the output of our circuit. The circuit looks like this:
<img src="1qubitcirc.png" width="400"/>
In order to measure the output in the $z-$basis, we calculate the $\sigma_\mathbf{z}$ expectation.
$$\sigma_\mathbf{z} = \sum_i z_i p(z_i)$$
We will see later how this all ties into the hybrid neural network.
```
class QuantumCircuit:
"""
This class provides a simple interface for interaction
with the quantum circuit
"""
def __init__(self, n_qubits, backend, shots):
# --- Circuit definition ---
self._circuit = qiskit.QuantumCircuit(n_qubits)
all_qubits = [i for i in range(n_qubits)]
self.theta = qiskit.circuit.Parameter('theta')
self._circuit.h(all_qubits)
self._circuit.barrier()
self._circuit.ry(self.theta, all_qubits)
self._circuit.measure_all()
# ---------------------------
self.backend = backend
self.shots = shots
def run(self, thetas):
t_qc = transpile(self._circuit,
self.backend)
qobj = assemble(t_qc,
shots=self.shots,
parameter_binds = [{self.theta: theta} for theta in thetas])
job = self.backend.run(qobj)
result = job.result().get_counts(self._circuit)
counts = np.array(list(result.values()))
states = np.array(list(result.keys())).astype(float)
# Compute probabilities for each state
probabilities = counts / self.shots
# Get state expectation
expectation = np.sum(states * probabilities)
return np.array([expectation])
```
Let's test the implementation
```
simulator = qiskit.Aer.get_backend('qasm_simulator')
circuit = QuantumCircuit(1, simulator, 100)
print('Expected value for rotation pi {}'.format(circuit.run([np.pi])[0]))
circuit._circuit.draw()
```
### 3.3 Create a "Quantum-Classical Class" with PyTorch <a id='qc-class'></a>
Now that our quantum circuit is defined, we can create the functions needed for backpropagation using PyTorch. [The forward and backward passes](http://www.ai.mit.edu/courses/6.034b/backprops.pdf) contain elements from our Qiskit class. The backward pass directly computes the analytical gradients using the finite difference formula we introduced above.
```
class HybridFunction(Function):
""" Hybrid quantum - classical function definition """
@staticmethod
def forward(ctx, input, quantum_circuit, shift):
""" Forward pass computation """
ctx.shift = shift
ctx.quantum_circuit = quantum_circuit
expectation_z = ctx.quantum_circuit.run(input[0].tolist())
result = torch.tensor([expectation_z])
ctx.save_for_backward(input, result)
return result
@staticmethod
def backward(ctx, grad_output):
""" Backward pass computation """
input, expectation_z = ctx.saved_tensors
input_list = np.array(input.tolist())
shift_right = input_list + np.ones(input_list.shape) * ctx.shift
shift_left = input_list - np.ones(input_list.shape) * ctx.shift
gradients = []
for i in range(len(input_list)):
expectation_right = ctx.quantum_circuit.run(shift_right[i])
expectation_left = ctx.quantum_circuit.run(shift_left[i])
gradient = torch.tensor([expectation_right]) - torch.tensor([expectation_left])
gradients.append(gradient)
gradients = np.array([gradients]).T
return torch.tensor([gradients]).float() * grad_output.float(), None, None
class Hybrid(nn.Module):
""" Hybrid quantum - classical layer definition """
def __init__(self, backend, shots, shift):
super(Hybrid, self).__init__()
self.quantum_circuit = QuantumCircuit(1, backend, shots)
self.shift = shift
def forward(self, input):
return HybridFunction.apply(input, self.quantum_circuit, self.shift)
```
### 3.4 Data Loading and Preprocessing <a id='data-loading-preprocessing'></a>
##### Putting this all together:
We will create a simple hybrid neural network to classify images of two types of digits (0 or 1) from the [MNIST dataset](http://yann.lecun.com/exdb/mnist/). We first load MNIST and filter for pictures containing 0's and 1's. These will serve as inputs for our neural network to classify.
#### Training data
```
# Concentrating on the first 100 samples
n_samples = 100
X_train = datasets.MNIST(root='./data', train=True, download=True,
transform=transforms.Compose([transforms.ToTensor()]))
# Leaving only labels 0 and 1
idx = np.append(np.where(X_train.targets == 0)[0][:n_samples],
np.where(X_train.targets == 1)[0][:n_samples])
X_train.data = X_train.data[idx]
X_train.targets = X_train.targets[idx]
train_loader = torch.utils.data.DataLoader(X_train, batch_size=1, shuffle=True)
n_samples_show = 6
data_iter = iter(train_loader)
fig, axes = plt.subplots(nrows=1, ncols=n_samples_show, figsize=(10, 3))
while n_samples_show > 0:
images, targets = data_iter.__next__()
axes[n_samples_show - 1].imshow(images[0].numpy().squeeze(), cmap='gray')
axes[n_samples_show - 1].set_xticks([])
axes[n_samples_show - 1].set_yticks([])
axes[n_samples_show - 1].set_title("Labeled: {}".format(targets.item()))
n_samples_show -= 1
```
#### Testing data
```
n_samples = 50
X_test = datasets.MNIST(root='./data', train=False, download=True,
transform=transforms.Compose([transforms.ToTensor()]))
idx = np.append(np.where(X_test.targets == 0)[0][:n_samples],
np.where(X_test.targets == 1)[0][:n_samples])
X_test.data = X_test.data[idx]
X_test.targets = X_test.targets[idx]
test_loader = torch.utils.data.DataLoader(X_test, batch_size=1, shuffle=True)
```
So far, we have loaded the data and coded a class that creates our quantum circuit which contains 1 trainable parameter. This quantum parameter will be inserted into a classical neural network along with the other classical parameters to form the hybrid neural network. We also created backward and forward pass functions that allow us to do backpropagation and optimise our neural network. Lastly, we need to specify our neural network architecture such that we can begin to train our parameters using optimisation techniques provided by PyTorch.
### 3.5 Creating the Hybrid Neural Network <a id='hybrid-nn'></a>
We can use a neat PyTorch pipeline to create a neural network architecture. The network will need to be compatible in terms of its dimensionality when we insert the quantum layer (i.e. our quantum circuit). Since our quantum in this example contains 1 parameter, we must ensure the network condenses neurons down to size 1. We create a typical Convolutional Neural Network with two fully-connected layers at the end. The value of the last neuron of the fully-connected layer is fed as the parameter $\theta$ into our quantum circuit. The circuit measurement then serves as the final prediction for 0 or 1 as provided by a $\sigma_z$ measurement.
```
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 6, kernel_size=5)
self.conv2 = nn.Conv2d(6, 16, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(256, 64)
self.fc2 = nn.Linear(64, 1)
self.hybrid = Hybrid(qiskit.Aer.get_backend('qasm_simulator'), 100, np.pi / 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = x.view(1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.hybrid(x)
return torch.cat((x, 1 - x), -1)
```
### 3.6 Training the Network <a id='training'></a>
We now have all the ingredients to train our hybrid network! We can specify any [PyTorch optimiser](https://pytorch.org/docs/stable/optim.html), [learning rate](https://en.wikipedia.org/wiki/Learning_rate) and [cost/loss function](https://en.wikipedia.org/wiki/Loss_function) in order to train over multiple epochs. In this instance, we use the [Adam optimiser](https://arxiv.org/abs/1412.6980), a learning rate of 0.001 and the [negative log-likelihood loss function](https://pytorch.org/docs/stable/_modules/torch/nn/modules/loss.html).
```
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 20
loss_list = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list[-1]))
```
Plot the training graph
```
plt.plot(loss_list)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
```
### 3.7 Testing the Network <a id='testing'></a>
```
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
n_samples_show = 6
count = 0
fig, axes = plt.subplots(nrows=1, ncols=n_samples_show, figsize=(10, 3))
model.eval()
with torch.no_grad():
for batch_idx, (data, target) in enumerate(test_loader):
if count == n_samples_show:
break
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
axes[count].imshow(data[0].numpy().squeeze(), cmap='gray')
axes[count].set_xticks([])
axes[count].set_yticks([])
axes[count].set_title('Predicted {}'.format(pred.item()))
count += 1
```
## 4. What Now? <a id='what-now'></a>
#### While it is totally possible to create hybrid neural networks, does this actually have any benefit?
In fact, the classical layers of this network train perfectly fine (in fact, better) without the quantum layer. Furthermore, you may have noticed that the quantum layer we trained here **generates no entanglement**, and will, therefore, continue to be classically simulatable as we scale up this particular architecture. This means that if you hope to achieve a quantum advantage using hybrid neural networks, you'll need to start by extending this code to include a more sophisticated quantum layer.
The point of this exercise was to get you thinking about integrating techniques from ML and quantum computing in order to investigate if there is indeed some element of interest - and thanks to PyTorch and Qiskit, this becomes a little bit easier.
```
import qiskit
qiskit.__qiskit_version__
```
|
github_jupyter
|
# install packages needed for this specific page
!pip install torchvision
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.autograd import Function
from torchvision import datasets, transforms
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import qiskit
from qiskit import transpile, assemble
from qiskit.visualization import *
class QuantumCircuit:
"""
This class provides a simple interface for interaction
with the quantum circuit
"""
def __init__(self, n_qubits, backend, shots):
# --- Circuit definition ---
self._circuit = qiskit.QuantumCircuit(n_qubits)
all_qubits = [i for i in range(n_qubits)]
self.theta = qiskit.circuit.Parameter('theta')
self._circuit.h(all_qubits)
self._circuit.barrier()
self._circuit.ry(self.theta, all_qubits)
self._circuit.measure_all()
# ---------------------------
self.backend = backend
self.shots = shots
def run(self, thetas):
t_qc = transpile(self._circuit,
self.backend)
qobj = assemble(t_qc,
shots=self.shots,
parameter_binds = [{self.theta: theta} for theta in thetas])
job = self.backend.run(qobj)
result = job.result().get_counts(self._circuit)
counts = np.array(list(result.values()))
states = np.array(list(result.keys())).astype(float)
# Compute probabilities for each state
probabilities = counts / self.shots
# Get state expectation
expectation = np.sum(states * probabilities)
return np.array([expectation])
simulator = qiskit.Aer.get_backend('qasm_simulator')
circuit = QuantumCircuit(1, simulator, 100)
print('Expected value for rotation pi {}'.format(circuit.run([np.pi])[0]))
circuit._circuit.draw()
class HybridFunction(Function):
""" Hybrid quantum - classical function definition """
@staticmethod
def forward(ctx, input, quantum_circuit, shift):
""" Forward pass computation """
ctx.shift = shift
ctx.quantum_circuit = quantum_circuit
expectation_z = ctx.quantum_circuit.run(input[0].tolist())
result = torch.tensor([expectation_z])
ctx.save_for_backward(input, result)
return result
@staticmethod
def backward(ctx, grad_output):
""" Backward pass computation """
input, expectation_z = ctx.saved_tensors
input_list = np.array(input.tolist())
shift_right = input_list + np.ones(input_list.shape) * ctx.shift
shift_left = input_list - np.ones(input_list.shape) * ctx.shift
gradients = []
for i in range(len(input_list)):
expectation_right = ctx.quantum_circuit.run(shift_right[i])
expectation_left = ctx.quantum_circuit.run(shift_left[i])
gradient = torch.tensor([expectation_right]) - torch.tensor([expectation_left])
gradients.append(gradient)
gradients = np.array([gradients]).T
return torch.tensor([gradients]).float() * grad_output.float(), None, None
class Hybrid(nn.Module):
""" Hybrid quantum - classical layer definition """
def __init__(self, backend, shots, shift):
super(Hybrid, self).__init__()
self.quantum_circuit = QuantumCircuit(1, backend, shots)
self.shift = shift
def forward(self, input):
return HybridFunction.apply(input, self.quantum_circuit, self.shift)
# Concentrating on the first 100 samples
n_samples = 100
X_train = datasets.MNIST(root='./data', train=True, download=True,
transform=transforms.Compose([transforms.ToTensor()]))
# Leaving only labels 0 and 1
idx = np.append(np.where(X_train.targets == 0)[0][:n_samples],
np.where(X_train.targets == 1)[0][:n_samples])
X_train.data = X_train.data[idx]
X_train.targets = X_train.targets[idx]
train_loader = torch.utils.data.DataLoader(X_train, batch_size=1, shuffle=True)
n_samples_show = 6
data_iter = iter(train_loader)
fig, axes = plt.subplots(nrows=1, ncols=n_samples_show, figsize=(10, 3))
while n_samples_show > 0:
images, targets = data_iter.__next__()
axes[n_samples_show - 1].imshow(images[0].numpy().squeeze(), cmap='gray')
axes[n_samples_show - 1].set_xticks([])
axes[n_samples_show - 1].set_yticks([])
axes[n_samples_show - 1].set_title("Labeled: {}".format(targets.item()))
n_samples_show -= 1
n_samples = 50
X_test = datasets.MNIST(root='./data', train=False, download=True,
transform=transforms.Compose([transforms.ToTensor()]))
idx = np.append(np.where(X_test.targets == 0)[0][:n_samples],
np.where(X_test.targets == 1)[0][:n_samples])
X_test.data = X_test.data[idx]
X_test.targets = X_test.targets[idx]
test_loader = torch.utils.data.DataLoader(X_test, batch_size=1, shuffle=True)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 6, kernel_size=5)
self.conv2 = nn.Conv2d(6, 16, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(256, 64)
self.fc2 = nn.Linear(64, 1)
self.hybrid = Hybrid(qiskit.Aer.get_backend('qasm_simulator'), 100, np.pi / 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = x.view(1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.hybrid(x)
return torch.cat((x, 1 - x), -1)
model = Net()
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.NLLLoss()
epochs = 20
loss_list = []
model.train()
for epoch in range(epochs):
total_loss = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
# Forward pass
output = model(data)
# Calculating loss
loss = loss_func(output, target)
# Backward pass
loss.backward()
# Optimize the weights
optimizer.step()
total_loss.append(loss.item())
loss_list.append(sum(total_loss)/len(total_loss))
print('Training [{:.0f}%]\tLoss: {:.4f}'.format(
100. * (epoch + 1) / epochs, loss_list[-1]))
plt.plot(loss_list)
plt.title('Hybrid NN Training Convergence')
plt.xlabel('Training Iterations')
plt.ylabel('Neg Log Likelihood Loss')
model.eval()
with torch.no_grad():
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss = loss_func(output, target)
total_loss.append(loss.item())
print('Performance on test data:\n\tLoss: {:.4f}\n\tAccuracy: {:.1f}%'.format(
sum(total_loss) / len(total_loss),
correct / len(test_loader) * 100)
)
n_samples_show = 6
count = 0
fig, axes = plt.subplots(nrows=1, ncols=n_samples_show, figsize=(10, 3))
model.eval()
with torch.no_grad():
for batch_idx, (data, target) in enumerate(test_loader):
if count == n_samples_show:
break
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
axes[count].imshow(data[0].numpy().squeeze(), cmap='gray')
axes[count].set_xticks([])
axes[count].set_yticks([])
axes[count].set_title('Predicted {}'.format(pred.item()))
count += 1
import qiskit
qiskit.__qiskit_version__
| 0.91622 | 0.995592 |
```
import numpy as np
import os
import pandas as pd
path = os.getcwd()
dataset_path = path + '\\dataset'
dataset_path
labels = ['环境', '计算机', '交通', '教育', '经济', '军事', '体育', '医药', '艺术', '政治']
# 加载训练集 并进行分词
import jieba
def load_dataset():
folders = os.listdir(dataset_path)
data_list = []
class_list = []
level = 0
for folder in folders:
files = os.listdir(dataset_path + '\\' + folder)
txt = ""
for file in files:
with open(dataset_path + '\\' + folder + '\\' + file, encoding='ANSI', errors='ignore') as f:
lines = f.readlines()
for line in lines:
txt += line.strip()
f.close()
txt += '。'
words = jieba.lcut(txt)
word_list = [t for t in words if t not in stopwords and len(t) > 1 and len(t) < 8]
data_list.append(word_list)
class_list.append(level)
level += 1
return data_list, class_list
# 读取stopwords
def load_stop():
f = open('stopword.txt', encoding='utf-8')
stopwords = f.read().split('\n')
f.close()
return stopwords
stopwords = load_stop()
len(stopwords)
data_list, class_list = load_dataset()
len(data_list), len(class_list)
# 创建词库 这里就是直接把所有词去重后,当作词库
def createVocabList(dataSet):
vocabSet = set([])
for document in dataSet:
vocabSet = vocabSet | set(document)
return list(vocabSet)
myVocabList = createVocabList(data_list)
len(myVocabList)
# 文本词向量。词库中每个词当作一个特征,文本中就该词,该词特征就是1,没有就是0
def trainNB(vocabList):
returnVec = []
pclass = []
word_sum = 0
word_total = [] # 总的单词(有重复)
for data in data_list:
pclass.append(len(data))
word_sum += len(data)
word_total += data
for i, val in enumerate(pclass):
pclass[i] = val*1.0 / word_sum
word_sum_vocabList = len(vocabList)
for word in vocabList:
Vec = []
word_cnt_total = word_total.count(word) # 所有训练集中词项出现的总次数
for data in data_list:
word_cnt = data.count(word) # 该类下词项出现的总次数
Vec.append( (word_cnt + 1.0) / (word_cnt_total + word_sum_vocabList) )
returnVec.append(Vec)
return pclass, returnVec
pclass, pwordvec = trainNB(myVocabList)
print(pclass)
print(pwordvec[0])
def load_test():
test_path = path + '\\testdata'
files = os.listdir(test_path)
test_data_list = []
for file in files:
txt = ''
with open(test_path + '\\' + file, encoding='ANSI', errors='ignore') as f:
lines = f.readlines()
for line in lines:
txt += line.strip()
f.close()
words = jieba.lcut(txt)
word_list = [t for t in words if t not in stopwords and len(t) > 1 and len(t) < 8]
test_data_list.append(word_list)
return files, test_data_list
test_names, test_data_list = load_test()
pclass = [i * 100 for i in pclass]
pclass
def predict(test_list):
pvec = pclass
for test_word in test_list:
if test_word not in myVocabList:
continue
word_index = myVocabList.index(test_word)
for i in range(10):
pvec[i] *= pwordvec[word_index][i] * 10
return pvec
predict_vec = []
predict_class = []
for test_data in test_data_list:
pvec = predict(test_data)
predict_vec.append(pvec)
predict_class.append(pvec.index(max(pvec)))
predict_vec[:2], predict_class[:2]
predict_ans = pd.DataFrame({'name': test_names, 'predict_class': predict_class})
predict_ans.head()
predict_ans.describe
predict_ans['predict_class'].unique()
```
|
github_jupyter
|
import numpy as np
import os
import pandas as pd
path = os.getcwd()
dataset_path = path + '\\dataset'
dataset_path
labels = ['环境', '计算机', '交通', '教育', '经济', '军事', '体育', '医药', '艺术', '政治']
# 加载训练集 并进行分词
import jieba
def load_dataset():
folders = os.listdir(dataset_path)
data_list = []
class_list = []
level = 0
for folder in folders:
files = os.listdir(dataset_path + '\\' + folder)
txt = ""
for file in files:
with open(dataset_path + '\\' + folder + '\\' + file, encoding='ANSI', errors='ignore') as f:
lines = f.readlines()
for line in lines:
txt += line.strip()
f.close()
txt += '。'
words = jieba.lcut(txt)
word_list = [t for t in words if t not in stopwords and len(t) > 1 and len(t) < 8]
data_list.append(word_list)
class_list.append(level)
level += 1
return data_list, class_list
# 读取stopwords
def load_stop():
f = open('stopword.txt', encoding='utf-8')
stopwords = f.read().split('\n')
f.close()
return stopwords
stopwords = load_stop()
len(stopwords)
data_list, class_list = load_dataset()
len(data_list), len(class_list)
# 创建词库 这里就是直接把所有词去重后,当作词库
def createVocabList(dataSet):
vocabSet = set([])
for document in dataSet:
vocabSet = vocabSet | set(document)
return list(vocabSet)
myVocabList = createVocabList(data_list)
len(myVocabList)
# 文本词向量。词库中每个词当作一个特征,文本中就该词,该词特征就是1,没有就是0
def trainNB(vocabList):
returnVec = []
pclass = []
word_sum = 0
word_total = [] # 总的单词(有重复)
for data in data_list:
pclass.append(len(data))
word_sum += len(data)
word_total += data
for i, val in enumerate(pclass):
pclass[i] = val*1.0 / word_sum
word_sum_vocabList = len(vocabList)
for word in vocabList:
Vec = []
word_cnt_total = word_total.count(word) # 所有训练集中词项出现的总次数
for data in data_list:
word_cnt = data.count(word) # 该类下词项出现的总次数
Vec.append( (word_cnt + 1.0) / (word_cnt_total + word_sum_vocabList) )
returnVec.append(Vec)
return pclass, returnVec
pclass, pwordvec = trainNB(myVocabList)
print(pclass)
print(pwordvec[0])
def load_test():
test_path = path + '\\testdata'
files = os.listdir(test_path)
test_data_list = []
for file in files:
txt = ''
with open(test_path + '\\' + file, encoding='ANSI', errors='ignore') as f:
lines = f.readlines()
for line in lines:
txt += line.strip()
f.close()
words = jieba.lcut(txt)
word_list = [t for t in words if t not in stopwords and len(t) > 1 and len(t) < 8]
test_data_list.append(word_list)
return files, test_data_list
test_names, test_data_list = load_test()
pclass = [i * 100 for i in pclass]
pclass
def predict(test_list):
pvec = pclass
for test_word in test_list:
if test_word not in myVocabList:
continue
word_index = myVocabList.index(test_word)
for i in range(10):
pvec[i] *= pwordvec[word_index][i] * 10
return pvec
predict_vec = []
predict_class = []
for test_data in test_data_list:
pvec = predict(test_data)
predict_vec.append(pvec)
predict_class.append(pvec.index(max(pvec)))
predict_vec[:2], predict_class[:2]
predict_ans = pd.DataFrame({'name': test_names, 'predict_class': predict_class})
predict_ans.head()
predict_ans.describe
predict_ans['predict_class'].unique()
| 0.125829 | 0.198413 |
# Example 9: Robust Controlled Invariance or Constrained Quadratic Regulator
The following theorem encodes the conditions for robust controlled invariance using zonotope containment. As a byproduct, we also obtain polytopic control laws and polytopic Lyapunov functions for linear discrete-time systems.
### Theorem
Consider a discrete-time disturbed linear system
\begin{equation}
x^+ \in Ax + Bu \oplus \mathbb{W},
\end{equation}
where $x \in \mathbb{R}^n$ is the state, $u \in \mathbb{R}^{m}$ is the control input, and $\mathbb{W}=\langle W \rangle \in \mathbb{R}^n$, is the zonotope disturbance and $W \in \mathbb{R}^{n \times n_w}$. If there exist $\phi \in \mathbb{R}^{n \times q}, \theta \in \mathbb{R}^{m \times q}$, where $q$ is user-defined, such that the following properties hold:
$$
\exists E \in \mathbb{R}^{n \times n_w}, \exists \alpha \in [0,1),
$$
$$
(A \phi + B \theta, W) = (E,\phi),~~ \langle E \rangle \subseteq \alpha
\langle W \rangle.
$$
$$
\mathcal{X} := \frac{1}{1-\alpha} \langle \phi \rangle, \mathcal{U} := \frac{1}{1-\alpha} \langle \theta \rangle.
$$
Then, $\mathcal{X}$ is a robust control invariant set under the control law $\pi: \mathcal{X} \rightarrow \mathcal{U}$, where $ \mathcal{X} := \frac{1}{1-\alpha} \langle \phi \rangle, \mathcal{U} := \frac{1}{1-\alpha} \langle \theta \rangle$ and
$$
\pi(x)=\theta~ \underset{\zeta, x=\phi \zeta}{\arg \min}~ \| \zeta \|_\infty. \\
$$
Moroever, $V : \mathbb{R}^n \rightarrow \mathbb{R}_+$ serves as a Lyapunov function for the undisturbed closed-loop system $Ax+B\pi(x)$, where:
$$
\begin{array}{lll}
V(x)=& \min & \| \zeta \|_\infty \\
& \text{subject to} & x= \phi \zeta.
\end{array}
$$
### Proof
Given $x= \frac{1}{1-\alpha} \phi \zeta, \|\zeta\|_\infty \le 1$, the control policy chooses $u=\frac{1}{1-\alpha} \theta \zeta$. Therefore, $x^+ \in \frac{1}{1-\alpha} \langle A\phi+B\theta \rangle \oplus \mathbb{W}$. We need to show that
$$
\frac{1}{1-\alpha} \langle A\phi+B\theta \rangle \oplus \mathbb{W} \in \frac{1}{1-\alpha} \langle \phi \rangle.
$$
From above we have the following:
$$
\label{eq_rci_proof_main}
\langle A\phi + B \theta \rangle \oplus \mathbb{W} \subseteq \alpha \mathbb{W} \oplus \langle \phi \rangle
$$
In order to derive the proof, we divide both sides by $(1-\alpha)$ and then take $\frac{\alpha}{1-\alpha}\mathbb{W}$ out from both sides. Note that this is a valid operation by the virtue of properties of support functions. A quick inspection also reveals that the Lyapunov function level sets are scalar multiplications of $\mathcal{X}$ and we have $V(A x + B\pi(x)) \subseteq V(x) \langle \phi \rangle$.
Theorem above provides a framework to compute constrained robust control invariant sets via subset encodings. Note that $\pi(x)$ is given as a linear program hence its explicit form is, in general, piecewise linear \cite{bemporad2000piecewise}. Given polyhedral constraints $\mathbb{X}$ and $\mathbb{U}$ on state and control, respectively, we can compute the constrained Quadratic Regulator (QR) with the following quadratic program:
\begin{equation}
\label{eq_quadratic_rci}
\begin{array}{ll}
\min & \text{tr}{(\phi \phi' Q)} + \text{tr}{(\theta \theta' R)} \\
\text{subject to} & (A \phi + B \theta, W) = (E,\phi),\\
& \langle \phi \rangle \subseteq (1-\alpha) \mathbb{X}, \langle \theta \rangle \subseteq (1-\alpha) \mathbb{U},
\\
&\langle E \rangle \subseteq \alpha \langle W \rangle, \\
& 0 \le \alpha \le 1,
\end{array}
\end{equation}
where $Q,R$ are appropriately sized positive definite matrix penalizing the zonotope generators of states and controls, respectively.
### Example:
Consider a system where $A=I+A_c\delta, B=[\frac{1}{2}\delta^2,\delta]$, where $A_c=[(0,1),(0,1)]$ and $\delta=0.01$, representing a double integrator with negative damping. The constraint sets are $\mathbb{X}=\mathbb{B}_1$ and $\mathbb{U}=20\mathbb{B}_\infty$, and $W=([(1,-1),(1,1)])\times \frac{\delta}{4}$. We solved with $Q=I, R=100 $. The smallest $q$ that makes the program feasible is $13$ - we deliberately selected a disturbance set such that no linear feedback policy $u=Kx$ for any $K$ solves this problem. The sets and sample undisturbed trajectories are shown.
```
import numpy as np
import pypolycontain as pp
import pydrake.solvers.mathematicalprogram as MP
import pydrake.solvers.gurobi as Gurobi_drake
# use Gurobi solver
global gurobi_solver, license
gurobi_solver=Gurobi_drake.GurobiSolver()
license = gurobi_solver.AcquireLicense()
np.random.seed(0)
n=2
A=np.eye(n)+np.random.randint(-5,5,size=(n,n))*1/10
A=np.eye(n)+np.random.randint(-5,5,size=(n,n))*1/10
dt=0.01
A=np.array([[1,dt],[0,1+1*dt]])
B=np.array([[dt**2/2,dt]]).reshape(2,1)
W=pp.zonotope(x=np.zeros((n,1)),G=np.array([[1,-1],[1,1]])*0.25*dt)
# X=pp.zonotope(x=np.zeros((n,1)),G=np.array([[1,0],[-0.5,1]]) ,color='cyan')
H=np.array([[-1,1],\
[1,-1],\
[-1,-1],[1,1]]).reshape(4,2)
h=np.array([1,1,1,1]).reshape(4,1)
X=pp.H_polytope(H,h,color='blue')
U=pp.zonotope(x=np.zeros((1,1)),G=np.eye(1)*20)
# Cost
Q=np.eye(n)
R=np.eye(1)*10000
pp.visualize([X])
prog=MP.MathematicalProgram()
n,m=A.shape[0],B.shape[1]
q=13
program = MP.MathematicalProgram()
phi=program.NewContinuousVariables(n,q,'phi')
theta=program.NewContinuousVariables(m,q,'theta')
alpha=program.NewContinuousVariables(1,'alpha')
program.AddBoundingBoxConstraint(0,1,alpha)
program.AddQuadraticCost(np.trace( np.linalg.multi_dot([phi.T,Q,phi]) ))
program.AddQuadraticCost(np.trace( np.linalg.multi_dot([theta.T,R,theta]) ))
K=np.hstack(( (np.dot(A,phi) + np.dot(B,theta))[:,n:] , W.G ))
program.AddLinearConstraint ( np.equal(K, phi, dtype='object').flatten() )
inbody=pp.zonotope(x=np.zeros((n,1)), G=(np.dot(A,phi)+np.dot(B,theta))[:,0:n])
_W=pp.to_AH_polytope(W)
_W.P.h=_W.P.h*alpha
_X=pp.to_AH_polytope(X)
_X.P.h=_X.P.h*(1-alpha)
_U=pp.to_AH_polytope(U)
_U.P.h=_U.P.h*(1-alpha)
pp.subset(program, inbody,circumbody=_W)
pp.subset(program, pp.zonotope(x=np.zeros((2,1)),G=phi),circumbody=_X)
pp.subset(program, pp.zonotope(x=np.zeros((1,1)),G=theta),circumbody=_U)
result=gurobi_solver.Solve(program,None,None)
if result.is_success():
print("sucsess")
alpha_n=result.GetSolution(alpha)
phi_n= result.GetSolution(phi)
theta_n= result.GetSolution(theta)
Omega=pp.zonotope(x=np.zeros((2,1)),G=phi_n/(1-alpha_n),color='red')
pp.visualize([X,Omega],title='Robust control invariant set $\mathcal{X}$ (red) \n \
inside safe set $\mathbb{X}$ (blue)',figsize=(6,6),a=0.02)
print("alpha was",alpha_n[0])
else:
print("failure")
def control(phi,theta,x):
program = MP.MathematicalProgram()
n,q=phi.shape
assert n,q==theta.shape
zeta=program.NewContinuousVariables(q,1,'zeta')
zeta_inf=program.NewContinuousVariables(1,1,'zeta_phi')
program.AddLinearConstraint( np.equal(x, np.dot(phi,zeta), dtype='object').flatten() )
program.AddLinearConstraint( np.greater_equal( zeta_inf, zeta, dtype='object').flatten() )
program.AddLinearConstraint( np.greater_equal( zeta_inf, -zeta, dtype='object').flatten() )
program.AddLinearCost( np.array([1]),np.array([0]), zeta_inf )
result=gurobi_solver.Solve(program,None,None)
if result.is_success():
# print("sucsess")
zeta_n=result.GetSolution(zeta)
# print( result.GetSolution(zeta_inf)[0] )
return np.dot(theta,zeta_n).reshape(1,1),result.GetSolution(zeta_inf)[0]
import matplotlib.pyplot as plt
fig,ax=plt.subplots()
# pp.visualize([pp.zonotope(G=V[t]*phi_n,color=(1-0.8*V[t]/V[0],0,0)) \
# for t in range(T)],ax=ax,fig=fig,\
# title=r'Sample Trajectory and Lyapunov Function Level Sets',a=0.1)
pp.visualize([pp.zonotope(G=p/15*phi_n/(1-alpha_n),color=(1*p/15,0,0)) \
for p in range(15,1,-1)],ax=ax,fig=fig,\
title='Sample Undisturbed Trajectories and \n Lyapunov Function Level Sets',a=0.02)
x0=pp.vcube(q)
fig.set_size_inches(6,6)
N=101
for i in range(N):
x={}
u={}
V={}
T=40
zeta=2*(np.random.random(size=(q,1))-0.5)
zeta=x0[int(i*len(x0)/N),:].reshape(q,1)
x[0]=np.dot(phi_n/(1-alpha_n), zeta/max(np.abs(zeta)))
for t in range(T):
u[t],V[t]=control(phi_n,theta_n,x[t])
x[t+1]=np.dot(A,x[t])+np.dot(B,u[t])
# ax.plot([x[t][0,0] for t in range(T+1)],[x[t][1,0] for t in range(T+1)],'*',color='cyan')
ax.plot([x[t][0,0] for t in range(1)],[x[t][1,0] for t in range(1)],'*',color='black',MarkerSize=1)
ax.plot([x[t][0,0] for t in range(T+1)],[x[t][1,0] for t in range(T+1)],'--',color='black',Linewidth=1)
K=np.dot( theta_n, np.linalg.pinv(phi_n) )
A_cl=A+B@K
np.linalg.eigvals(A_cl)
K@phi_n-theta_n
```
|
github_jupyter
|
import numpy as np
import pypolycontain as pp
import pydrake.solvers.mathematicalprogram as MP
import pydrake.solvers.gurobi as Gurobi_drake
# use Gurobi solver
global gurobi_solver, license
gurobi_solver=Gurobi_drake.GurobiSolver()
license = gurobi_solver.AcquireLicense()
np.random.seed(0)
n=2
A=np.eye(n)+np.random.randint(-5,5,size=(n,n))*1/10
A=np.eye(n)+np.random.randint(-5,5,size=(n,n))*1/10
dt=0.01
A=np.array([[1,dt],[0,1+1*dt]])
B=np.array([[dt**2/2,dt]]).reshape(2,1)
W=pp.zonotope(x=np.zeros((n,1)),G=np.array([[1,-1],[1,1]])*0.25*dt)
# X=pp.zonotope(x=np.zeros((n,1)),G=np.array([[1,0],[-0.5,1]]) ,color='cyan')
H=np.array([[-1,1],\
[1,-1],\
[-1,-1],[1,1]]).reshape(4,2)
h=np.array([1,1,1,1]).reshape(4,1)
X=pp.H_polytope(H,h,color='blue')
U=pp.zonotope(x=np.zeros((1,1)),G=np.eye(1)*20)
# Cost
Q=np.eye(n)
R=np.eye(1)*10000
pp.visualize([X])
prog=MP.MathematicalProgram()
n,m=A.shape[0],B.shape[1]
q=13
program = MP.MathematicalProgram()
phi=program.NewContinuousVariables(n,q,'phi')
theta=program.NewContinuousVariables(m,q,'theta')
alpha=program.NewContinuousVariables(1,'alpha')
program.AddBoundingBoxConstraint(0,1,alpha)
program.AddQuadraticCost(np.trace( np.linalg.multi_dot([phi.T,Q,phi]) ))
program.AddQuadraticCost(np.trace( np.linalg.multi_dot([theta.T,R,theta]) ))
K=np.hstack(( (np.dot(A,phi) + np.dot(B,theta))[:,n:] , W.G ))
program.AddLinearConstraint ( np.equal(K, phi, dtype='object').flatten() )
inbody=pp.zonotope(x=np.zeros((n,1)), G=(np.dot(A,phi)+np.dot(B,theta))[:,0:n])
_W=pp.to_AH_polytope(W)
_W.P.h=_W.P.h*alpha
_X=pp.to_AH_polytope(X)
_X.P.h=_X.P.h*(1-alpha)
_U=pp.to_AH_polytope(U)
_U.P.h=_U.P.h*(1-alpha)
pp.subset(program, inbody,circumbody=_W)
pp.subset(program, pp.zonotope(x=np.zeros((2,1)),G=phi),circumbody=_X)
pp.subset(program, pp.zonotope(x=np.zeros((1,1)),G=theta),circumbody=_U)
result=gurobi_solver.Solve(program,None,None)
if result.is_success():
print("sucsess")
alpha_n=result.GetSolution(alpha)
phi_n= result.GetSolution(phi)
theta_n= result.GetSolution(theta)
Omega=pp.zonotope(x=np.zeros((2,1)),G=phi_n/(1-alpha_n),color='red')
pp.visualize([X,Omega],title='Robust control invariant set $\mathcal{X}$ (red) \n \
inside safe set $\mathbb{X}$ (blue)',figsize=(6,6),a=0.02)
print("alpha was",alpha_n[0])
else:
print("failure")
def control(phi,theta,x):
program = MP.MathematicalProgram()
n,q=phi.shape
assert n,q==theta.shape
zeta=program.NewContinuousVariables(q,1,'zeta')
zeta_inf=program.NewContinuousVariables(1,1,'zeta_phi')
program.AddLinearConstraint( np.equal(x, np.dot(phi,zeta), dtype='object').flatten() )
program.AddLinearConstraint( np.greater_equal( zeta_inf, zeta, dtype='object').flatten() )
program.AddLinearConstraint( np.greater_equal( zeta_inf, -zeta, dtype='object').flatten() )
program.AddLinearCost( np.array([1]),np.array([0]), zeta_inf )
result=gurobi_solver.Solve(program,None,None)
if result.is_success():
# print("sucsess")
zeta_n=result.GetSolution(zeta)
# print( result.GetSolution(zeta_inf)[0] )
return np.dot(theta,zeta_n).reshape(1,1),result.GetSolution(zeta_inf)[0]
import matplotlib.pyplot as plt
fig,ax=plt.subplots()
# pp.visualize([pp.zonotope(G=V[t]*phi_n,color=(1-0.8*V[t]/V[0],0,0)) \
# for t in range(T)],ax=ax,fig=fig,\
# title=r'Sample Trajectory and Lyapunov Function Level Sets',a=0.1)
pp.visualize([pp.zonotope(G=p/15*phi_n/(1-alpha_n),color=(1*p/15,0,0)) \
for p in range(15,1,-1)],ax=ax,fig=fig,\
title='Sample Undisturbed Trajectories and \n Lyapunov Function Level Sets',a=0.02)
x0=pp.vcube(q)
fig.set_size_inches(6,6)
N=101
for i in range(N):
x={}
u={}
V={}
T=40
zeta=2*(np.random.random(size=(q,1))-0.5)
zeta=x0[int(i*len(x0)/N),:].reshape(q,1)
x[0]=np.dot(phi_n/(1-alpha_n), zeta/max(np.abs(zeta)))
for t in range(T):
u[t],V[t]=control(phi_n,theta_n,x[t])
x[t+1]=np.dot(A,x[t])+np.dot(B,u[t])
# ax.plot([x[t][0,0] for t in range(T+1)],[x[t][1,0] for t in range(T+1)],'*',color='cyan')
ax.plot([x[t][0,0] for t in range(1)],[x[t][1,0] for t in range(1)],'*',color='black',MarkerSize=1)
ax.plot([x[t][0,0] for t in range(T+1)],[x[t][1,0] for t in range(T+1)],'--',color='black',Linewidth=1)
K=np.dot( theta_n, np.linalg.pinv(phi_n) )
A_cl=A+B@K
np.linalg.eigvals(A_cl)
K@phi_n-theta_n
| 0.134335 | 0.996024 |
# GLM: Poisson Regression
## A minimal reproducable example of poisson regression to predict counts using dummy data.
This Notebook is basically an excuse to demo poisson regression using PyMC3, both manually and using the `glm` library to demo interactions using the `patsy` library. We will create some dummy data, poisson distributed according to a linear model, and try to recover the coefficients of that linear model through inference.
For more statistical detail see:
+ Basic info on [Wikipedia](https://en.wikipedia.org/wiki/Poisson_regression)
+ GLMs: Poisson regression, exposure, and overdispersion in Chapter 6.2 of [ARM, Gelmann & Hill 2006](http://www.stat.columbia.edu/%7Egelman/arm/)
+ This worked example from ARM 6.2 by [Clay Ford](http://www.clayford.net/statistics/poisson-regression-ch-6-of-gelman-and-hill/)
This very basic model is insipired by [a project by Ian Osvald](http://ianozsvald.com/2016/05/07/statistically-solving-sneezes-and-sniffles-a-work-in-progress-report-at-pydatalondon-2016/), which is concerend with understanding the various effects of external environmental factors upon the allergic sneezing of a test subject.
## Contents
+ [Setup](#Setup)
+ [Local Functions](#Local-Functions)
+ [Generate Data](#Generate-Data)
+ [Poisson Regression](#Poisson-Regression)
+ [Create Design Matrices](#Create-Design-Matrices)
+ [Create Model](#Create-Model)
+ [Sample Model](#Sample-Model)
+ [View Diagnostics and Outputs](#View-Diagnostics-and-Outputs)
## Package Requirements (shown as a conda-env YAML):
```
$> less conda_env_pymc3_examples.yml
name: pymc3_examples
channels:
- defaults
dependencies:
- python=3.5
- jupyter
- ipywidgets
- numpy
- scipy
- matplotlib
- pandas
- pytables
- scikit-learn
- statsmodels
- seaborn
- patsy
- requests
- pip
- pip:
- regex
$> conda env create --file conda_env_pymc3_examples.yml
$> source activate pymc3_examples
$> pip install --process-dependency-links git+https://github.com/pymc-devs/pymc3
```
## Setup
```
## Interactive magics
%matplotlib inline
import sys
import warnings
warnings.filterwarnings('ignore')
import re
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import patsy as pt
from scipy import optimize
# pymc3 libraries
import pymc3 as pm
import theano as thno
import theano.tensor as T
sns.set(style="darkgrid", palette="muted")
pd.set_option('display.mpl_style', 'default')
plt.rcParams['figure.figsize'] = 14, 6
np.random.seed(0)
```
## Local Functions
```
def strip_derived_rvs(rvs):
'''Convenience fn: remove PyMC3-generated RVs from a list'''
ret_rvs = []
for rv in rvs:
if not (re.search('_log',rv.name) or re.search('_interval',rv.name)):
ret_rvs.append(rv)
return ret_rvs
def plot_traces_pymc(trcs, varnames=None):
''' Convenience fn: plot traces with overlaid means and values '''
nrows = len(trcs.varnames)
if varnames is not None:
nrows = len(varnames)
ax = pm.traceplot(trcs, varnames=varnames, figsize=(12,nrows*1.4),
lines={k: v['mean'] for k, v in
pm.df_summary(trcs,varnames=varnames).iterrows()})
for i, mn in enumerate(pm.df_summary(trcs, varnames=varnames)['mean']):
ax[i,0].annotate('{:.2f}'.format(mn), xy=(mn,0), xycoords='data',
xytext=(5,10), textcoords='offset points', rotation=90,
va='bottom', fontsize='large', color='#AA0022')
```
## Generate Data
This dummy dataset is created to emulate some data created as part of a study into quantified self, and the real data is more complicated than this. Ask Ian Osvald if you'd like to know more https://twitter.com/ianozsvald
### Assumptions:
+ The subject sneezes N times per day, recorded as `nsneeze (int)`
+ The subject may or may not drink alcohol during that day, recorded as `alcohol (boolean)`
+ The subject may or may not take an antihistamine medication during that day, recorded as the negative action `nomeds (boolean)`
+ I postulate (probably incorrectly) that sneezing occurs at some baseline rate, which increases if an antihistamine is not taken, and further increased after alcohol is consumed.
+ The data is aggegated per day, to yield a total count of sneezes on that day, with a boolean flag for alcohol and antihistamine usage, with the big assumption that nsneezes have a direct causal relationship.
**Create 4000 days of data: daily counts of sneezes which are poisson distributed w.r.t alcohol consumption and antihistamine usage**
```
# decide poisson theta values
theta_noalcohol_meds = 1 # no alcohol, took an antihist
theta_alcohol_meds = 3 # alcohol, took an antihist
theta_noalcohol_nomeds = 6 # no alcohol, no antihist
theta_alcohol_nomeds = 36 # alcohol, no antihist
# create samples
q = 1000
df = pd.DataFrame({
'nsneeze': np.concatenate((np.random.poisson(theta_noalcohol_meds, q),
np.random.poisson(theta_alcohol_meds, q),
np.random.poisson(theta_noalcohol_nomeds, q),
np.random.poisson(theta_alcohol_nomeds, q))),
'alcohol': np.concatenate((np.repeat(False, q),
np.repeat(True, q),
np.repeat(False, q),
np.repeat(True, q))),
'nomeds': np.concatenate((np.repeat(False, q),
np.repeat(False, q),
np.repeat(True, q),
np.repeat(True, q)))})
df.tail()
```
##### View means of the various combinations (poisson mean values)
```
df.groupby(['alcohol','nomeds']).mean().unstack()
```
### Briefly Describe Dataset
```
g = sns.factorplot(x='nsneeze', row='nomeds', col='alcohol', data=df,
kind='count', size=4, aspect=1.5)
```
**Observe:**
+ This looks a lot like poisson-distributed count data (because it is)
+ With `nomeds == False` and `alcohol == False` (top-left, akak antihistamines WERE used, alcohol was NOT drunk) the mean of the poisson distribution of sneeze counts is low.
+ Changing `alcohol == True` (top-right) increases the sneeze count `nsneeze` slightly
+ Changing `nomeds == True` (lower-left) increases the sneeze count `nsneeze` further
+ Changing both `alcohol == True and nomeds == True` (lower-right) increases the sneeze count `nsneeze` a lot, increasing both the mean and variance.
---
## Poisson Regression
Our model here is a very simple Poisson regression, allowing for interaction of terms:
$$ \theta = exp(\beta X)$$
$$ Y_{sneeze\_count} ~ Poisson(\theta)$$
**Create linear model for interaction of terms**
```
fml = 'nsneeze ~ alcohol + antihist + alcohol:antihist' # full patsy formulation
fml = 'nsneeze ~ alcohol * nomeds' # lazy, alternative patsy formulation
```
### 1. Manual method, create design matrices and manually specify model
**Create Design Matrices**
```
(mx_en, mx_ex) = pt.dmatrices(fml, df, return_type='dataframe', NA_action='raise')
pd.concat((mx_ex.head(3),mx_ex.tail(3)))
```
**Create Model**
```
with pm.Model() as mdl_fish:
# define priors, weakly informative Normal
b0 = pm.Normal('b0_intercept', mu=0, sd=10)
b1 = pm.Normal('b1_alcohol[T.True]', mu=0, sd=10)
b2 = pm.Normal('b2_nomeds[T.True]', mu=0, sd=10)
b3 = pm.Normal('b3_alcohol[T.True]:nomeds[T.True]', mu=0, sd=10)
# define linear model and exp link function
theta = (b0 +
b1 * mx_ex['alcohol[T.True]'] +
b2 * mx_ex['nomeds[T.True]'] +
b3 * mx_ex['alcohol[T.True]:nomeds[T.True]'])
## Define Poisson likelihood
y = pm.Poisson('y', mu=np.exp(theta), observed=mx_en['nsneeze'].values)
```
**Sample Model**
```
with mdl_fish:
trc_fish = pm.sample(2000, tune=1000, cores=4)[1000:]
```
**View Diagnostics**
```
rvs_fish = [rv.name for rv in strip_derived_rvs(mdl_fish.unobserved_RVs)]
plot_traces_pymc(trc_fish, varnames=rvs_fish)
```
**Observe:**
+ The model converges quickly and traceplots looks pretty well mixed
### Transform coeffs and recover theta values
```
np.exp(pm.df_summary(trc_fish, varnames=rvs_fish)[['mean','hpd_2.5','hpd_97.5']])
```
**Observe:**
+ The contributions from each feature as a multiplier of the baseline sneezecount appear to be as per the data generation:
1. exp(b0_intercept): mean=1.02 cr=[0.96, 1.08]
Roughly linear baseline count when no alcohol and meds, as per the generated data:
theta_noalcohol_meds = 1 (as set above)
theta_noalcohol_meds = exp(b0_intercept)
= 1
2. exp(b1_alcohol): mean=2.88 cr=[2.69, 3.09]
non-zero positive effect of adding alcohol, a ~3x multiplier of
baseline sneeze count, as per the generated data:
theta_alcohol_meds = 3 (as set above)
theta_alcohol_meds = exp(b0_intercept + b1_alcohol)
= exp(b0_intercept) * exp(b1_alcohol)
= 1 * 3 = 3
3. exp(b2_nomeds[T.True]): mean=5.76 cr=[5.40, 6.17]
larger, non-zero positive effect of adding nomeds, a ~6x multiplier of
baseline sneeze count, as per the generated data:
theta_noalcohol_nomeds = 6 (as set above)
theta_noalcohol_nomeds = exp(b0_intercept + b2_nomeds)
= exp(b0_intercept) * exp(b2_nomeds)
= 1 * 6 = 6
4. exp(b3_alcohol[T.True]:nomeds[T.True]): mean=2.12 cr=[1.98, 2.30]
small, positive interaction effect of alcohol and meds, a ~2x multiplier of
baseline sneeze count, as per the generated data:
theta_alcohol_nomeds = 36 (as set above)
theta_alcohol_nomeds = exp(b0_intercept + b1_alcohol + b2_nomeds + b3_alcohol:nomeds)
= exp(b0_intercept) * exp(b1_alcohol) * exp(b2_nomeds * b3_alcohol:nomeds)
= 1 * 3 * 6 * 2 = 36
---
### 2. Alternative method, using `pymc.glm`
**Create Model**
**Alternative automatic formulation using `pmyc.glm`**
```
with pm.Model() as mdl_fish_alt:
pm.glm.GLM.from_formula(fml, df, family=pm.glm.families.Poisson())
```
**Sample Model**
```
with mdl_fish_alt:
trc_fish_alt = pm.sample(4000, tune=2000)[2000:]
```
**View Traces**
```
rvs_fish_alt = [rv.name for rv in strip_derived_rvs(mdl_fish_alt.unobserved_RVs)]
plot_traces_pymc(trc_fish_alt, varnames=rvs_fish_alt)
```
### Transform coeffs
```
np.exp(pm.df_summary(trc_fish_alt, varnames=rvs_fish_alt)[['mean','hpd_2.5','hpd_97.5']])
```
**Observe:**
+ The traceplots look well mixed
+ The transformed model coeffs look moreorless the same as those generated by the manual model
+ Note also that the `mu` coeff is for the overall mean of the dataset and has an extreme skew, if we look at the median value ...
```
np.percentile(trc_fish_alt['mu'], [25,50,75])
```
... of 9.45 with a range [25%, 75%] of [4.17, 24.18], we see this is pretty close to the overall mean of:
```
df['nsneeze'].mean()
```
---
Example originally contributed by Jonathan Sedar 2016-05-15 [github.com/jonsedar](https://github.com/jonsedar)
|
github_jupyter
|
$> less conda_env_pymc3_examples.yml
name: pymc3_examples
channels:
- defaults
dependencies:
- python=3.5
- jupyter
- ipywidgets
- numpy
- scipy
- matplotlib
- pandas
- pytables
- scikit-learn
- statsmodels
- seaborn
- patsy
- requests
- pip
- pip:
- regex
$> conda env create --file conda_env_pymc3_examples.yml
$> source activate pymc3_examples
$> pip install --process-dependency-links git+https://github.com/pymc-devs/pymc3
## Interactive magics
%matplotlib inline
import sys
import warnings
warnings.filterwarnings('ignore')
import re
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import patsy as pt
from scipy import optimize
# pymc3 libraries
import pymc3 as pm
import theano as thno
import theano.tensor as T
sns.set(style="darkgrid", palette="muted")
pd.set_option('display.mpl_style', 'default')
plt.rcParams['figure.figsize'] = 14, 6
np.random.seed(0)
def strip_derived_rvs(rvs):
'''Convenience fn: remove PyMC3-generated RVs from a list'''
ret_rvs = []
for rv in rvs:
if not (re.search('_log',rv.name) or re.search('_interval',rv.name)):
ret_rvs.append(rv)
return ret_rvs
def plot_traces_pymc(trcs, varnames=None):
''' Convenience fn: plot traces with overlaid means and values '''
nrows = len(trcs.varnames)
if varnames is not None:
nrows = len(varnames)
ax = pm.traceplot(trcs, varnames=varnames, figsize=(12,nrows*1.4),
lines={k: v['mean'] for k, v in
pm.df_summary(trcs,varnames=varnames).iterrows()})
for i, mn in enumerate(pm.df_summary(trcs, varnames=varnames)['mean']):
ax[i,0].annotate('{:.2f}'.format(mn), xy=(mn,0), xycoords='data',
xytext=(5,10), textcoords='offset points', rotation=90,
va='bottom', fontsize='large', color='#AA0022')
# decide poisson theta values
theta_noalcohol_meds = 1 # no alcohol, took an antihist
theta_alcohol_meds = 3 # alcohol, took an antihist
theta_noalcohol_nomeds = 6 # no alcohol, no antihist
theta_alcohol_nomeds = 36 # alcohol, no antihist
# create samples
q = 1000
df = pd.DataFrame({
'nsneeze': np.concatenate((np.random.poisson(theta_noalcohol_meds, q),
np.random.poisson(theta_alcohol_meds, q),
np.random.poisson(theta_noalcohol_nomeds, q),
np.random.poisson(theta_alcohol_nomeds, q))),
'alcohol': np.concatenate((np.repeat(False, q),
np.repeat(True, q),
np.repeat(False, q),
np.repeat(True, q))),
'nomeds': np.concatenate((np.repeat(False, q),
np.repeat(False, q),
np.repeat(True, q),
np.repeat(True, q)))})
df.tail()
df.groupby(['alcohol','nomeds']).mean().unstack()
g = sns.factorplot(x='nsneeze', row='nomeds', col='alcohol', data=df,
kind='count', size=4, aspect=1.5)
fml = 'nsneeze ~ alcohol + antihist + alcohol:antihist' # full patsy formulation
fml = 'nsneeze ~ alcohol * nomeds' # lazy, alternative patsy formulation
(mx_en, mx_ex) = pt.dmatrices(fml, df, return_type='dataframe', NA_action='raise')
pd.concat((mx_ex.head(3),mx_ex.tail(3)))
with pm.Model() as mdl_fish:
# define priors, weakly informative Normal
b0 = pm.Normal('b0_intercept', mu=0, sd=10)
b1 = pm.Normal('b1_alcohol[T.True]', mu=0, sd=10)
b2 = pm.Normal('b2_nomeds[T.True]', mu=0, sd=10)
b3 = pm.Normal('b3_alcohol[T.True]:nomeds[T.True]', mu=0, sd=10)
# define linear model and exp link function
theta = (b0 +
b1 * mx_ex['alcohol[T.True]'] +
b2 * mx_ex['nomeds[T.True]'] +
b3 * mx_ex['alcohol[T.True]:nomeds[T.True]'])
## Define Poisson likelihood
y = pm.Poisson('y', mu=np.exp(theta), observed=mx_en['nsneeze'].values)
with mdl_fish:
trc_fish = pm.sample(2000, tune=1000, cores=4)[1000:]
rvs_fish = [rv.name for rv in strip_derived_rvs(mdl_fish.unobserved_RVs)]
plot_traces_pymc(trc_fish, varnames=rvs_fish)
np.exp(pm.df_summary(trc_fish, varnames=rvs_fish)[['mean','hpd_2.5','hpd_97.5']])
with pm.Model() as mdl_fish_alt:
pm.glm.GLM.from_formula(fml, df, family=pm.glm.families.Poisson())
with mdl_fish_alt:
trc_fish_alt = pm.sample(4000, tune=2000)[2000:]
rvs_fish_alt = [rv.name for rv in strip_derived_rvs(mdl_fish_alt.unobserved_RVs)]
plot_traces_pymc(trc_fish_alt, varnames=rvs_fish_alt)
np.exp(pm.df_summary(trc_fish_alt, varnames=rvs_fish_alt)[['mean','hpd_2.5','hpd_97.5']])
np.percentile(trc_fish_alt['mu'], [25,50,75])
df['nsneeze'].mean()
| 0.454714 | 0.981841 |
# Metadata
```
Course: DS 5001
Module: 00 Final Projects
Topic: Using MALLET
```
# Notes
See Appendix below information on how download and install <a href="https://mimno.github.io/Mallet/topics">MALLET</a>.
# Set Up
```
data_home = "../data"
local_lib = "../lib"
data_prefix = 'novels'
OHCO = ['book_id','chap_id','para_num','sent_num','token_num']
max_words = 10000
# For MALLET
num_topics = 20
num_iters = 1000
show_interval = 100
import pandas as pd
import numpy as np
```
# Import CORPUS
```
LIB = pd.read_csv(f"{data_home}/{data_prefix}/{data_prefix}-LIB.csv").set_index(OHCO[:1])
CORPUS = pd.read_csv(f"{data_home}/{data_prefix}/{data_prefix}-CORPUS.csv").set_index(OHCO)
CORPUS.head()
```
# Create DOC
```
def gather_docs(CORPUS, ohco_level, term_col='term_str'):
OHCO = CORPUS.index.names
CORPUS[term_col] = CORPUS[term_col].astype('str')
DOC = CORPUS.groupby(OHCO[:ohco_level])[term_col].apply(lambda x:' '.join(x)).to_frame('doc_str')
return DOC
DOC = gather_docs(CORPUS, 2)
DOC['n_tokens'] = DOC.doc_str.apply(lambda x: len(x.split()))
DOC
```
## Dump corpus to CSV file
```
mallet_corpus = DOC.join(LIB)[['doc_str','author_id']]
mallet_corpus.columns = 'doc_content doc_label'.split()
mallet_corpus[['doc_label','doc_content']].to_csv('novels-corpus.csv', index=False)
```
## MALLET Time
### Show MALLET options
```
mallet_home = "/Users/rca2t1/opt/mallet/bin"
! which mallet
```
### Import corpus
```
!{mallet_home}/mallet import-file --input novels-corpus.csv --output novels-corpus.mallet --keep-sequence TRUE
```
### Train topics
```
!{mallet_home}/mallet train-topics --input novels-corpus.mallet --num-topics {num_topics} --num-iterations {num_iters} \
--output-doc-topics novels-doc-topics.txt \
--output-topic-keys novels-topic-keys.txt \
--word-topic-counts-file novels-word-topic-counts-file.txt \
--topic-word-weights-file novels-topic-word-weights-file.txt \
--xml-topic-report novels-topic-report.xml \
--xml-topic-phrase-report novels-topic-phrase-report.xml \
--show-topics-interval {show_interval} \
--use-symmetric-alpha false \
--optimize-interval 100 \
--diagnostics-file novels-diagnostics.xml
```
# Appendix: README
<a href="https://mimno.github.io/Mallet/topics">Download MALLET here</a> | <a href="https://github.com/ontoligent/mazo">Mazo, a wrapper around MALLET to organize its output.</a>
[](https://travis-ci.com/MNCC/Mallet)
[](https://codecov.io/gh/MNCC/Mallet)
Mallet
======
Website: https://mimno.github.io/Mallet/
MALLET is a Java-based package for statistical natural language processing, document classification, clustering, topic modeling, information extraction, and other machine learning applications to text.
MALLET includes sophisticated tools for document classification: efficient routines for converting text to "features", a wide variety of algorithms (including Naïve Bayes, Maximum Entropy, and Decision Trees), and code for evaluating classifier performance using several commonly used metrics.
In addition to classification, MALLET includes tools for sequence tagging for applications such as named-entity extraction from text. Algorithms include Hidden Markov Models, Maximum Entropy Markov Models, and Conditional Random Fields. These methods are implemented in an extensible system for finite state transducers.
Topic models are useful for analyzing large collections of unlabeled text. The MALLET topic modeling toolkit contains efficient, sampling-based implementations of Latent Dirichlet Allocation, Pachinko Allocation, and Hierarchical LDA.
Many of the algorithms in MALLET depend on numerical optimization. MALLET includes an efficient implementation of Limited Memory BFGS, among many other optimization methods.
In addition to sophisticated Machine Learning applications, MALLET includes routines for transforming text documents into numerical representations that can then be processed efficiently. This process is implemented through a flexible system of "pipes", which handle distinct tasks such as tokenizing strings, removing stopwords, and converting sequences into count vectors.
An add-on package to MALLET, called GRMM, contains support for inference in general graphical models, and training of CRFs with arbitrary graphical structure.
## Installation
To build a Mallet 2.0 development release, you must have the Apache ant build tool installed. From the command prompt, first change to the mallet directory, and then type
`ant`
If `ant` finishes with `"BUILD SUCCESSFUL"`, Mallet is now ready to use.
If you would like to deploy Mallet as part of a larger application, it is helpful to create a single ".jar" file that contains all of the compiled code. Once you have compiled the individual Mallet class files, use the command:
`ant jar`
This process will create a file "mallet.jar" in the "dist" directory within Mallet.
## Usage
Once you have installed Mallet you can use it using the following command:
```
bin/mallet [command] --option value --option value ...
```
Type `bin/mallet` to get a list of commands, and use the option `--help` with any command to get a description of valid options.
For details about the commands please visit the API documentation and website at: https://mimno.github.io/Mallet/
## List of Algorithms
* Topic Modelling
* LDA
* Parallel LDA
* DMR LDA
* Hierarchical LDA
* Labeled LDA
* Polylingual Topic Model
* Hierarchical Pachinko Allocation Model (PAM)
* Weighted Topic Model
* LDA with integrated phrase discovery
* Word Embeddings (word2vec) using skip-gram with negative sampling
* Classification
* AdaBoost
* Bagging
* Winnow
* C45 Decision Tree
* Ensemble Trainer
* Maximum Entropy Classifier (Multinomial Logistic Regression)
* Naive Bayes
* Rank Maximum Entropy Classifier
* Posterior Regularization Auxiliary Model
* Clustering
* Greedy Agglomerative
* Hill Climbing
* K-Means
* K-Best
* Sequence Prediction Models
* Conditional Random Fields
* Maximum Entropy Markov Models
* Hidden Markov Models
* Semi-Supervised Sequence Prediction Models
* Linear Regression
|
github_jupyter
|
Course: DS 5001
Module: 00 Final Projects
Topic: Using MALLET
data_home = "../data"
local_lib = "../lib"
data_prefix = 'novels'
OHCO = ['book_id','chap_id','para_num','sent_num','token_num']
max_words = 10000
# For MALLET
num_topics = 20
num_iters = 1000
show_interval = 100
import pandas as pd
import numpy as np
LIB = pd.read_csv(f"{data_home}/{data_prefix}/{data_prefix}-LIB.csv").set_index(OHCO[:1])
CORPUS = pd.read_csv(f"{data_home}/{data_prefix}/{data_prefix}-CORPUS.csv").set_index(OHCO)
CORPUS.head()
def gather_docs(CORPUS, ohco_level, term_col='term_str'):
OHCO = CORPUS.index.names
CORPUS[term_col] = CORPUS[term_col].astype('str')
DOC = CORPUS.groupby(OHCO[:ohco_level])[term_col].apply(lambda x:' '.join(x)).to_frame('doc_str')
return DOC
DOC = gather_docs(CORPUS, 2)
DOC['n_tokens'] = DOC.doc_str.apply(lambda x: len(x.split()))
DOC
mallet_corpus = DOC.join(LIB)[['doc_str','author_id']]
mallet_corpus.columns = 'doc_content doc_label'.split()
mallet_corpus[['doc_label','doc_content']].to_csv('novels-corpus.csv', index=False)
mallet_home = "/Users/rca2t1/opt/mallet/bin"
! which mallet
!{mallet_home}/mallet import-file --input novels-corpus.csv --output novels-corpus.mallet --keep-sequence TRUE
!{mallet_home}/mallet train-topics --input novels-corpus.mallet --num-topics {num_topics} --num-iterations {num_iters} \
--output-doc-topics novels-doc-topics.txt \
--output-topic-keys novels-topic-keys.txt \
--word-topic-counts-file novels-word-topic-counts-file.txt \
--topic-word-weights-file novels-topic-word-weights-file.txt \
--xml-topic-report novels-topic-report.xml \
--xml-topic-phrase-report novels-topic-phrase-report.xml \
--show-topics-interval {show_interval} \
--use-symmetric-alpha false \
--optimize-interval 100 \
--diagnostics-file novels-diagnostics.xml
bin/mallet [command] --option value --option value ...
| 0.38549 | 0.834407 |
### Import Libraries
```
import pandas as pd
import joblib
import numpy as np
import json
from xgboost import XGBClassifier
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import StandardScaler,OneHotEncoder
from sklearn.impute import SimpleImputer
#In case I need to update datarobot-drum
!pip install datarobot-drum --upgrade
```
### Import Data
```
train = pd.read_csv('../data/readmissions_train.csv')
X = train.drop('readmitted',axis=1)
X.drop(['diag_1_desc', 'diag_1', 'diag_2', 'diag_3'],axis=1,inplace=True)
y = train.pop('readmitted')
```
### Define Preprocessing step per type of column
```
#Preprocessing for numerical features
numeric_features = list(X.select_dtypes('int64').columns)
for c in numeric_features:
X[c] = X[c].fillna(0)
numeric_transformer = Pipeline(steps=[
('scaler', StandardScaler())])
#Preprocessing for categorical features
categorical_features = list(X.select_dtypes('object').columns)
for c in categorical_features:
X[c] = X[c].fillna('missing')
categorical_transformer = Pipeline(steps=[
('OneHotEncoder', OneHotEncoder(handle_unknown='ignore'))])
#Preprocessor with all of the steps
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, numeric_features),
('cat', categorical_transformer, categorical_features)])
```
### Fit the Preprocessing Pipeline
```
# Full preprocessing pipeline
pipeline = Pipeline(steps=[('preprocessor', preprocessor)])
#Train the model-Pipeline
pipeline.fit(X,y)
#Preprocess x
preprocessed = pipeline.transform(X)
#I could also train the model with the sparse matrix. I transform it to padnas because the hook function in custom.py expected a pandas dataframe to be used for scoring.
preprocessed = pd.DataFrame.sparse.from_spmatrix(preprocessed)
```
### Train XGboost Classifier
Normally, the XGboost classifier could be part of the final scikit-learn pipeline. I am opting to keep them separate in order to create a more complicated example with different pkl files for preprocessing and scoring
```
model = XGBClassifier(colsample_bylevel=0.2, max_depth= 10, learning_rate = 0.02, n_estimators=300)
model.fit(preprocessed, y)
```
### Save Custom Model files
```
joblib.dump(pipeline,'custom_model/preprocessing.pkl')
joblib.dump(model, 'custom_model/model.pkl')
!drum validation --code-dir ./custom_model --input ../data/readmissions_test.csv --target-type binary --positive-class-label True --negative-class-label False
```
### Validate model can work as `Custom Training Model`
```
!drum fit --code-dir ./custom_model --input ../data/readmissions_train.csv --target-type binary --target readmitted --positive-class-label True --negative-class-label False
!drum score --code-dir ./custom_model --input ../data/readmissions_test.csv --target-type binary --positive-class-label True --negative-class-label False
```
|
github_jupyter
|
import pandas as pd
import joblib
import numpy as np
import json
from xgboost import XGBClassifier
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import StandardScaler,OneHotEncoder
from sklearn.impute import SimpleImputer
#In case I need to update datarobot-drum
!pip install datarobot-drum --upgrade
train = pd.read_csv('../data/readmissions_train.csv')
X = train.drop('readmitted',axis=1)
X.drop(['diag_1_desc', 'diag_1', 'diag_2', 'diag_3'],axis=1,inplace=True)
y = train.pop('readmitted')
#Preprocessing for numerical features
numeric_features = list(X.select_dtypes('int64').columns)
for c in numeric_features:
X[c] = X[c].fillna(0)
numeric_transformer = Pipeline(steps=[
('scaler', StandardScaler())])
#Preprocessing for categorical features
categorical_features = list(X.select_dtypes('object').columns)
for c in categorical_features:
X[c] = X[c].fillna('missing')
categorical_transformer = Pipeline(steps=[
('OneHotEncoder', OneHotEncoder(handle_unknown='ignore'))])
#Preprocessor with all of the steps
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, numeric_features),
('cat', categorical_transformer, categorical_features)])
# Full preprocessing pipeline
pipeline = Pipeline(steps=[('preprocessor', preprocessor)])
#Train the model-Pipeline
pipeline.fit(X,y)
#Preprocess x
preprocessed = pipeline.transform(X)
#I could also train the model with the sparse matrix. I transform it to padnas because the hook function in custom.py expected a pandas dataframe to be used for scoring.
preprocessed = pd.DataFrame.sparse.from_spmatrix(preprocessed)
model = XGBClassifier(colsample_bylevel=0.2, max_depth= 10, learning_rate = 0.02, n_estimators=300)
model.fit(preprocessed, y)
joblib.dump(pipeline,'custom_model/preprocessing.pkl')
joblib.dump(model, 'custom_model/model.pkl')
!drum validation --code-dir ./custom_model --input ../data/readmissions_test.csv --target-type binary --positive-class-label True --negative-class-label False
!drum fit --code-dir ./custom_model --input ../data/readmissions_train.csv --target-type binary --target readmitted --positive-class-label True --negative-class-label False
!drum score --code-dir ./custom_model --input ../data/readmissions_test.csv --target-type binary --positive-class-label True --negative-class-label False
| 0.491212 | 0.705443 |
## Cluster Based Analysis
```
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pickle
from salishsea_tools import viz_tools
%matplotlib inline
```
### Load in matched data from Elise
```
df = pickle.load(open('/data/eolson/results/MEOPAR/clusterGroups/DFODataModelClusterBIO_1905temp.pkl', 'rb'))
df
df.keys()
# Set Datetime as Index
df['datetime'] = pd.to_datetime(df[['Year', 'Month', 'Day', 'Hour']])
df = df.set_index('datetime')
# Quick Look
plt.plot(df.l10_obsChl[df.Cluster==1], df.l10_modChl[df.Cluster==1])
# plot matched data locations
fig, ax = plt.subplots(figsize = (6,6))
viz_tools.set_aspect(ax, coords='map')
ax.plot(df.Lon[df.Cluster==1], df.Lat[df.Cluster==1], 'ko', label='Cluster 1')
ax.plot(df.Lon[df.Cluster==2], df.Lat[df.Cluster==2], 'bo', label='Cluster 2')
ax.plot(df.Lon[df.Cluster==3], df.Lat[df.Cluster==3], 'co', label='Cluster 3')
ax.plot(df.Lon[df.Cluster==4], df.Lat[df.Cluster==4], 'ro', label='Cluster 4')
ax.plot(df.Lon[df.Cluster==5], df.Lat[df.Cluster==5], 'o', color='darkblue', label='Cluster 5')
bathy = '/home/sallen/MEOPAR/grid/bathymetry_201702.nc'
viz_tools.plot_land_mask(ax, bathy, coords='map', color='burlywood')
ax.set_ylim(48, 50.5)
ax.legend()
ax.set_xlim(-125.9, -122.5);
# Look at various means
print (df.Chl[df.Cluster==3].mean())
print (df.Chl[df.Cluster==4].mean())
print (df.mod_Chl[df.Cluster==3].mean(), np.log10(0.001 + df.mod_Chl[df.Cluster==3].mean()))
print (df.mod_Chl[df.Cluster==4].mean())
# Mean plot, not very convincing
fig, ax = plt.subplots(1, 1, figsize=(5, 5))
ax.plot([3, 4], [df.l10_obsChl[df.Cluster==3].mean(), df.l10_obsChl[df.Cluster==4].mean()], marker='o', markersize=10, fillstyle='none',
linewidth=0, label='Observations')
ax.plot([3, 4], [df.l10_modChl[df.Cluster==3].mean(), df.l10_modChl[df.Cluster==4].mean()], marker='x', markersize=10, fillstyle='none',
linewidth=0, label='Model');
# plot matched data locations
fig, ax = plt.subplots(figsize = (6, 6))
ax.plot(df.Chl[df.Cluster==1], df.mod_Chl[df.Cluster==1], 'ko', label='Cluster 1')
ax.plot(df.Chl[df.Cluster==2], df.mod_Chl[df.Cluster==2], 'bo', label='Cluster 2')
ax.plot(df.Chl[df.Cluster==3], df.mod_Chl[df.Cluster==3], 'co', label='Cluster 3')
ax.plot(df.Chl[df.Cluster==4], df.mod_Chl[df.Cluster==4], 'ro', label='Cluster 4')
ax.plot(df.Chl[df.Cluster==5], df.mod_Chl[df.Cluster==5], 'o', color='darkblue', label='Cluster 5')
ax.legend();
def logt(x):
return np.log10(0.001 + x)
logt(0.1)
# plot matched data locations
plt.rcParams['font.size'] = 17
fig, ax = plt.subplots(figsize = (6, 6))
alpha = 0.25
lim= 0.12
marksize = 20
#ax.plot(df.l10_obsChl[df.Cluster==1], df.l10_modChl[df.Cluster==1], 'ko', label='Cluster 1')
#ax.plot(df.l10_obsChl[df.Cluster==2], df.l10_modChl[df.Cluster==2], 'bo', label='Cluster 2')
ax.plot(df.l10_obsChl[df.Cluster==3], df.l10_modChl[df.Cluster==3], 'o', color='skyblue', alpha=alpha)
ax.plot(df.l10_obsChl[df.Cluster==4], df.l10_modChl[df.Cluster==4], 'ro', alpha=alpha)
ax.plot(df.l10_obsChl[df.Cluster==5], df.l10_modChl[df.Cluster==5], 'o', color='darkblue', alpha=alpha)
ax.set_ylim(logt(lim), 2);
ax.set_xlim(logt(lim), 2)
print (df.Chl[(df.Cluster==5) & (df.Chl>lim)].count())
estimate = (df.Chl[(df.Cluster==5) & (df.Chl>lim)].std()/np.sqrt(298))
#ax.plot(logt(df.Chl[(df.Cluster==5) & (df.Chl>lim)].mean()+estimate), 1.5, 'gx')
#ax.plot(logt(df.Chl[(df.Cluster==5) & (df.Chl>lim)].mean()-estimate), 1.5, 'gx')
ax.plot([logt(df.Chl[(df.Cluster==5) & (df.Chl>lim)].mean()),
],
[logt(df.mod_Chl[(df.Cluster==5) & (df.mod_Chl>lim)].mean()),
],
marker='s', markersize=marksize, markeredgecolor='k',
linewidth=0, color='darkblue', label='Juan de Fuca');
ax.plot([logt(df.Chl[(df.Cluster==3) & (df.Chl>lim)].mean()), ],
[logt(df.mod_Chl[(df.Cluster==3) & (df.mod_Chl>lim)].mean()), ],
marker='s', markersize=marksize, markeredgecolor='k',
linewidth=0, color='skyblue', label='Central');
ax.plot([logt(df.Chl[(df.Cluster==4) & (df.Chl>lim)].mean()),
],
[logt(df.mod_Chl[(df.Cluster==4) & (df.mod_Chl>lim)].mean()),
],
marker='s', markersize=marksize, markeredgecolor='k',
linewidth=0, color='r', label='North');
ax.set_xlabel('Log 10 Observed Chlorophyll (log 10 mg m$^{-3}$)')
ax.set_ylabel('Log 10 Modelled Chlorophyll\n(log 10 mg m$^{-3}$)')
ax.legend();
plt.savefig('cluster_chl.png', bbox_inches='tight')
plt.plot(df.mod_Chl)
df.mod_Chl.mean()
df['mod_Chl'][df['mod_Chl'] == 0] = np.nan
np.nanmean(df.mod_Chl)
```
|
github_jupyter
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pickle
from salishsea_tools import viz_tools
%matplotlib inline
df = pickle.load(open('/data/eolson/results/MEOPAR/clusterGroups/DFODataModelClusterBIO_1905temp.pkl', 'rb'))
df
df.keys()
# Set Datetime as Index
df['datetime'] = pd.to_datetime(df[['Year', 'Month', 'Day', 'Hour']])
df = df.set_index('datetime')
# Quick Look
plt.plot(df.l10_obsChl[df.Cluster==1], df.l10_modChl[df.Cluster==1])
# plot matched data locations
fig, ax = plt.subplots(figsize = (6,6))
viz_tools.set_aspect(ax, coords='map')
ax.plot(df.Lon[df.Cluster==1], df.Lat[df.Cluster==1], 'ko', label='Cluster 1')
ax.plot(df.Lon[df.Cluster==2], df.Lat[df.Cluster==2], 'bo', label='Cluster 2')
ax.plot(df.Lon[df.Cluster==3], df.Lat[df.Cluster==3], 'co', label='Cluster 3')
ax.plot(df.Lon[df.Cluster==4], df.Lat[df.Cluster==4], 'ro', label='Cluster 4')
ax.plot(df.Lon[df.Cluster==5], df.Lat[df.Cluster==5], 'o', color='darkblue', label='Cluster 5')
bathy = '/home/sallen/MEOPAR/grid/bathymetry_201702.nc'
viz_tools.plot_land_mask(ax, bathy, coords='map', color='burlywood')
ax.set_ylim(48, 50.5)
ax.legend()
ax.set_xlim(-125.9, -122.5);
# Look at various means
print (df.Chl[df.Cluster==3].mean())
print (df.Chl[df.Cluster==4].mean())
print (df.mod_Chl[df.Cluster==3].mean(), np.log10(0.001 + df.mod_Chl[df.Cluster==3].mean()))
print (df.mod_Chl[df.Cluster==4].mean())
# Mean plot, not very convincing
fig, ax = plt.subplots(1, 1, figsize=(5, 5))
ax.plot([3, 4], [df.l10_obsChl[df.Cluster==3].mean(), df.l10_obsChl[df.Cluster==4].mean()], marker='o', markersize=10, fillstyle='none',
linewidth=0, label='Observations')
ax.plot([3, 4], [df.l10_modChl[df.Cluster==3].mean(), df.l10_modChl[df.Cluster==4].mean()], marker='x', markersize=10, fillstyle='none',
linewidth=0, label='Model');
# plot matched data locations
fig, ax = plt.subplots(figsize = (6, 6))
ax.plot(df.Chl[df.Cluster==1], df.mod_Chl[df.Cluster==1], 'ko', label='Cluster 1')
ax.plot(df.Chl[df.Cluster==2], df.mod_Chl[df.Cluster==2], 'bo', label='Cluster 2')
ax.plot(df.Chl[df.Cluster==3], df.mod_Chl[df.Cluster==3], 'co', label='Cluster 3')
ax.plot(df.Chl[df.Cluster==4], df.mod_Chl[df.Cluster==4], 'ro', label='Cluster 4')
ax.plot(df.Chl[df.Cluster==5], df.mod_Chl[df.Cluster==5], 'o', color='darkblue', label='Cluster 5')
ax.legend();
def logt(x):
return np.log10(0.001 + x)
logt(0.1)
# plot matched data locations
plt.rcParams['font.size'] = 17
fig, ax = plt.subplots(figsize = (6, 6))
alpha = 0.25
lim= 0.12
marksize = 20
#ax.plot(df.l10_obsChl[df.Cluster==1], df.l10_modChl[df.Cluster==1], 'ko', label='Cluster 1')
#ax.plot(df.l10_obsChl[df.Cluster==2], df.l10_modChl[df.Cluster==2], 'bo', label='Cluster 2')
ax.plot(df.l10_obsChl[df.Cluster==3], df.l10_modChl[df.Cluster==3], 'o', color='skyblue', alpha=alpha)
ax.plot(df.l10_obsChl[df.Cluster==4], df.l10_modChl[df.Cluster==4], 'ro', alpha=alpha)
ax.plot(df.l10_obsChl[df.Cluster==5], df.l10_modChl[df.Cluster==5], 'o', color='darkblue', alpha=alpha)
ax.set_ylim(logt(lim), 2);
ax.set_xlim(logt(lim), 2)
print (df.Chl[(df.Cluster==5) & (df.Chl>lim)].count())
estimate = (df.Chl[(df.Cluster==5) & (df.Chl>lim)].std()/np.sqrt(298))
#ax.plot(logt(df.Chl[(df.Cluster==5) & (df.Chl>lim)].mean()+estimate), 1.5, 'gx')
#ax.plot(logt(df.Chl[(df.Cluster==5) & (df.Chl>lim)].mean()-estimate), 1.5, 'gx')
ax.plot([logt(df.Chl[(df.Cluster==5) & (df.Chl>lim)].mean()),
],
[logt(df.mod_Chl[(df.Cluster==5) & (df.mod_Chl>lim)].mean()),
],
marker='s', markersize=marksize, markeredgecolor='k',
linewidth=0, color='darkblue', label='Juan de Fuca');
ax.plot([logt(df.Chl[(df.Cluster==3) & (df.Chl>lim)].mean()), ],
[logt(df.mod_Chl[(df.Cluster==3) & (df.mod_Chl>lim)].mean()), ],
marker='s', markersize=marksize, markeredgecolor='k',
linewidth=0, color='skyblue', label='Central');
ax.plot([logt(df.Chl[(df.Cluster==4) & (df.Chl>lim)].mean()),
],
[logt(df.mod_Chl[(df.Cluster==4) & (df.mod_Chl>lim)].mean()),
],
marker='s', markersize=marksize, markeredgecolor='k',
linewidth=0, color='r', label='North');
ax.set_xlabel('Log 10 Observed Chlorophyll (log 10 mg m$^{-3}$)')
ax.set_ylabel('Log 10 Modelled Chlorophyll\n(log 10 mg m$^{-3}$)')
ax.legend();
plt.savefig('cluster_chl.png', bbox_inches='tight')
plt.plot(df.mod_Chl)
df.mod_Chl.mean()
df['mod_Chl'][df['mod_Chl'] == 0] = np.nan
np.nanmean(df.mod_Chl)
| 0.337968 | 0.781247 |
# POS tagging
## Part-of-speech tagging
## 词性标注
### 例如,名词,动词,形容词等
## 实例:
输入:[The cat sat on the mat]
输出:[DT NN VB IN DT NN]
```
from keras.layers.core import Activation, Dense, Dropout, RepeatVector, SpatialDropout1D
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import GRU
from keras.layers.wrappers import TimeDistributed
from keras.models import Sequential
from keras.preprocessing import sequence
from keras.utils import np_utils
from sklearn.model_selection import train_test_split
import collections
import nltk
import numpy as np
import os
DATA_DIR ="./data"
fedata = open(os.path.join(DATA_DIR, "treebank_sents.txt"), mode="w")
ffdata = open(os.path.join(DATA_DIR, "treebank_poss.txt"), mode="w")
sents = nltk.corpus.treebank.tagged_sents()
for sent in sents:
#print(sent)
words, poss = [], []
for word, pos in sent:
if pos=="-NONE-":
continue
words.append(word)
poss.append(pos)
fedata.write("{:s}\n".format(" ".join(words)))
ffdata.write("{:s}\n".format(" ".join(poss)))
fedata.close()
ffdata.close()
def parse_sentences(filename):
word_freqs = collections.Counter()
num_recs, maxlen = 0,0
fin = open(filename, mode="r")
for line in fin:
words = line.strip().lower().split()
for word in words:
word_freqs[word]+=1
if len(words)>maxlen:
maxlen=len(words)
num_recs+=1
fin.close()
return word_freqs, maxlen, num_recs
s_wordfreqs, s_maxlen, s_numrecs = parse_sentences(os.path.join(DATA_DIR, "treebank_sents.txt"))
t_wordfreqs, t_maxlen, t_numrecs = parse_sentences(os.path.join(DATA_DIR, "treebank_poss.txt"))
print("source word freqs length: %d; max length: %d; num recs: %d" % (len(s_wordfreqs), s_maxlen, s_numrecs))
print("target word freqs length: %d; max length: %d; num recs: %d" % (len(t_wordfreqs), t_maxlen, t_numrecs))
```
## 语料库中,有10497个不同单词,最长的句子为249,共有3914个句子
## 语料库中,有45中不同类型的单词
```
MAX_SEQLEN = 250
S_MAX_FEATURES = 5000
T_MAX_FEATURES = 45
## 只使用source的前5000个不同单词
## 再加上 UNK 和 PAD 两个伪编码
s_vocabsize = min(len(s_wordfreqs), S_MAX_FEATURES) + 2
s_word2index = {x[0]:(i+2) for i,x in enumerate(s_wordfreqs.most_common(S_MAX_FEATURES))}
s_word2index["PAD"]=0
s_word2index["UNK"]=1
s_index2word = {v:k for k,v in s_word2index.items()}
t_vocabsize = len(t_wordfreqs) + 1
t_word2index = {x[0]:(i+1) for i,x in enumerate(t_wordfreqs.most_common(T_MAX_FEATURES))}
t_word2index["PAD"]=0
t_index2word = {v:k for k,v in t_word2index.items()}
s_word2index
t_word2index
def build_tensor(filename, numrecs, word2index, maxlen, make_categorical=False, num_classes=0):
data = np.empty((numrecs,), dtype=list)
with open(filename, "r") as fin:
i=0
for line in fin:
wids=[]
for word in line.strip().lower().split():
if word in word2index:
wids.append(word2index[word])
else:
wids.append(word2index["UNK"])
if make_categorical:
data[i] = np_utils.to_categorical(wids, num_classes=num_classes)
else:
data[i]=wids
i+=1
pdata = sequence.pad_sequences(data, maxlen=maxlen)
return pdata
X = build_tensor(os.path.join(DATA_DIR, "treebank_sents.txt"), s_numrecs, s_word2index, MAX_SEQLEN)
y = build_tensor(os.path.join(DATA_DIR, "treebank_poss.txt"), t_numrecs, t_word2index, MAX_SEQLEN, True, t_vocabsize)
Xtrain,Xtest,Ytrain,Ytest = train_test_split(X,y, test_size=0.2, random_state=666)
Xtrain.shape
Xtest.shape
Xtrain[0]
Ytrain[0]
Ytrain[0][0]
```

## 网络结构
1. 输入,每个单词的id组成的list,长度为,(None, MAX_SEQLEN, 1)
2. 经过embedding层,输出,(None, MAX_SEQLEN,EMBEDDING_SIZE)
3. 经过RNN encoder后,设置return_sequences=False, 只需要返回最后的上下文向量,看完MAX_SEQLEN后,输出(None, HIDDEN_SIZE)
4. 使用RepeatVector层,输出(None, MAX_SEQLEN, HIDDEN_SIZE)
5. 输入到RNN decoder,输出(None, MAX_SEQLEN, HIDDEN_SIZE)
6. 输入到全连接层,使用softmax激活函数,输出(None, MAX_SEQLEN, t_vocab_size)。输出的每一列的最大值,表示所属的词性标注。
```
help(GRU)
help(TimeDistributed)
EMBED_SIZE = 128
HIDDEN_SIZE = 64
BATCH_SIZE = 32
NUM_EPOCHS = 1
model = Sequential()
model.add(Embedding(input_dim=s_vocabsize, output_dim=EMBED_SIZE, input_length=MAX_SEQLEN))
model.add(Dropout(0.2))
model.add(GRU(HIDDEN_SIZE, dropout=0.2, recurrent_dropout=0.2))
model.add(RepeatVector(MAX_SEQLEN))
model.add(GRU(HIDDEN_SIZE, return_sequences=True))
model.add(TimeDistributed(Dense(units=t_vocabsize)))
model.add(Activation("softmax"))
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.fit(Xtrain, Ytrain, batch_size = BATCH_SIZE, epochs = NUM_EPOCHS, validation_data=[Xtest, Ytest])
score,acc = model.evaluate(Xtest, Ytest, batch_size=BATCH_SIZE)
print("Test score: %.3f, accuracy:%.3f" % (score, acc))
```
|
github_jupyter
|
from keras.layers.core import Activation, Dense, Dropout, RepeatVector, SpatialDropout1D
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import GRU
from keras.layers.wrappers import TimeDistributed
from keras.models import Sequential
from keras.preprocessing import sequence
from keras.utils import np_utils
from sklearn.model_selection import train_test_split
import collections
import nltk
import numpy as np
import os
DATA_DIR ="./data"
fedata = open(os.path.join(DATA_DIR, "treebank_sents.txt"), mode="w")
ffdata = open(os.path.join(DATA_DIR, "treebank_poss.txt"), mode="w")
sents = nltk.corpus.treebank.tagged_sents()
for sent in sents:
#print(sent)
words, poss = [], []
for word, pos in sent:
if pos=="-NONE-":
continue
words.append(word)
poss.append(pos)
fedata.write("{:s}\n".format(" ".join(words)))
ffdata.write("{:s}\n".format(" ".join(poss)))
fedata.close()
ffdata.close()
def parse_sentences(filename):
word_freqs = collections.Counter()
num_recs, maxlen = 0,0
fin = open(filename, mode="r")
for line in fin:
words = line.strip().lower().split()
for word in words:
word_freqs[word]+=1
if len(words)>maxlen:
maxlen=len(words)
num_recs+=1
fin.close()
return word_freqs, maxlen, num_recs
s_wordfreqs, s_maxlen, s_numrecs = parse_sentences(os.path.join(DATA_DIR, "treebank_sents.txt"))
t_wordfreqs, t_maxlen, t_numrecs = parse_sentences(os.path.join(DATA_DIR, "treebank_poss.txt"))
print("source word freqs length: %d; max length: %d; num recs: %d" % (len(s_wordfreqs), s_maxlen, s_numrecs))
print("target word freqs length: %d; max length: %d; num recs: %d" % (len(t_wordfreqs), t_maxlen, t_numrecs))
MAX_SEQLEN = 250
S_MAX_FEATURES = 5000
T_MAX_FEATURES = 45
## 只使用source的前5000个不同单词
## 再加上 UNK 和 PAD 两个伪编码
s_vocabsize = min(len(s_wordfreqs), S_MAX_FEATURES) + 2
s_word2index = {x[0]:(i+2) for i,x in enumerate(s_wordfreqs.most_common(S_MAX_FEATURES))}
s_word2index["PAD"]=0
s_word2index["UNK"]=1
s_index2word = {v:k for k,v in s_word2index.items()}
t_vocabsize = len(t_wordfreqs) + 1
t_word2index = {x[0]:(i+1) for i,x in enumerate(t_wordfreqs.most_common(T_MAX_FEATURES))}
t_word2index["PAD"]=0
t_index2word = {v:k for k,v in t_word2index.items()}
s_word2index
t_word2index
def build_tensor(filename, numrecs, word2index, maxlen, make_categorical=False, num_classes=0):
data = np.empty((numrecs,), dtype=list)
with open(filename, "r") as fin:
i=0
for line in fin:
wids=[]
for word in line.strip().lower().split():
if word in word2index:
wids.append(word2index[word])
else:
wids.append(word2index["UNK"])
if make_categorical:
data[i] = np_utils.to_categorical(wids, num_classes=num_classes)
else:
data[i]=wids
i+=1
pdata = sequence.pad_sequences(data, maxlen=maxlen)
return pdata
X = build_tensor(os.path.join(DATA_DIR, "treebank_sents.txt"), s_numrecs, s_word2index, MAX_SEQLEN)
y = build_tensor(os.path.join(DATA_DIR, "treebank_poss.txt"), t_numrecs, t_word2index, MAX_SEQLEN, True, t_vocabsize)
Xtrain,Xtest,Ytrain,Ytest = train_test_split(X,y, test_size=0.2, random_state=666)
Xtrain.shape
Xtest.shape
Xtrain[0]
Ytrain[0]
Ytrain[0][0]
help(GRU)
help(TimeDistributed)
EMBED_SIZE = 128
HIDDEN_SIZE = 64
BATCH_SIZE = 32
NUM_EPOCHS = 1
model = Sequential()
model.add(Embedding(input_dim=s_vocabsize, output_dim=EMBED_SIZE, input_length=MAX_SEQLEN))
model.add(Dropout(0.2))
model.add(GRU(HIDDEN_SIZE, dropout=0.2, recurrent_dropout=0.2))
model.add(RepeatVector(MAX_SEQLEN))
model.add(GRU(HIDDEN_SIZE, return_sequences=True))
model.add(TimeDistributed(Dense(units=t_vocabsize)))
model.add(Activation("softmax"))
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.fit(Xtrain, Ytrain, batch_size = BATCH_SIZE, epochs = NUM_EPOCHS, validation_data=[Xtest, Ytest])
score,acc = model.evaluate(Xtest, Ytest, batch_size=BATCH_SIZE)
print("Test score: %.3f, accuracy:%.3f" % (score, acc))
| 0.443841 | 0.771284 |
# Space Time Scan
# Using SatScan
- Start SaTScan and make a new session
- Under the "Input" tab:
- Set the "Case File" to "chicago.cas"
- Set the "Coordinates Files" to "chicago.geo"
- Set "Coordinates" to "Cartesian"
- Set "Time Precision" to Day
- Set the "Study Period" from 2011-03-01 to 2011-09-27 (or whatever)
- Under the "Analysis" tab:
- Select "Propsective Analysis" -> "Space-Time"
- Select "Probability Model" -> "Space-Time Permutation"
- Select "Time Aggregation" -> "1 Day"
- Under the "Output" tab:
- Select the "Main Results File" to whatever
*Optionally* change the spatial and temporal window:
- Under "Analysis", click "Advanced":
- Under "Spatial Window", select "is a circle with a ..."
- Under "Temporal Window", select "Maximum Temporal Cluster Size" is ... days
## Using our library code
```
%matplotlib inline
from common import *
#datadir = os.path.join("//media", "disk", "Data")
datadir = os.path.join("..", "..", "..", "..", "..", "Data")
south_side, points = load_data(datadir)
grid = grid_for_south_side()
import open_cp.stscan as stscan
import open_cp.stscan2 as stscan2
trainer = stscan.STSTrainer()
trainer.region = grid.region()
trainer.data = points
scanner, _ = trainer.to_scanner()
scanner.coords.shape, scanner.timestamps.shape
# Check how we covert the data
last_time = max(trainer.data.timestamps)
x = (last_time - trainer.data.timestamps) / np.timedelta64(1,"ms")
indexes = np.argsort(x)
np.testing.assert_allclose(x[indexes], scanner.timestamps)
np.testing.assert_allclose(trainer.data.coords[:,indexes], scanner.coords)
```
## Save to SatScan format
Embarrasingly, we now seem to have surpassed SaTScan in terms of speed and member usage, and so our code can analyse somewhat larger datasets (we do not compute p-values, of course, but we are _still_ faster...)
```
ts = scanner.timestamps / 1000 / 60
ts = ts[:100]
c = scanner.coords[:,:100]
stscan2.AbstractSTScan.write_to_satscan("temp", max(ts), c, ts)
max(max(ts) - ts)
scanner.timestamps = scanner.timestamps[:100]
scanner.coords = scanner.coords[:,:100]
list(scanner.find_all_clusters())
```
### Bin times
```
trainer1 = trainer.bin_timestamps(np.datetime64("2017-01-01"), np.timedelta64(1, "D"))
trainer1.data.number_data_points, trainer1.data.time_range
trainer1.to_satscan("test")
result = trainer1.predict()
result.statistics[:5]
```
### Grid first
```
trainer1 = trainer.grid_coords(grid.region(), grid.xsize)
trainer1 = trainer1.bin_timestamps(np.datetime64("2017-01-01"), np.timedelta64(1, "D"))
#trainer1.data = trainer1.data[trainer1.data.timestamps < np.datetime64("2011-04-01")]
trainer1.data.number_data_points, trainer1.data.time_range
trainer1.to_satscan("test")
result = trainer1.predict()
result.statistics[:5]
```
## Run the full analysis
```
result = trainer.predict()
result.clusters[0]
pred = result.grid_prediction(grid.xsize)
pred.mask_with(grid)
import matplotlib.patches
def plot_clusters(ax, coords, clusters):
xmax, xmin = np.max(scanner.coords[0]), np.min(scanner.coords[0])
xd = (xmax - xmin) / 100 * 5
ax.set(xlim=[xmin-xd, xmax+xd])
ymax, ymin = np.max(scanner.coords[1]), np.min(scanner.coords[1])
yd = (ymax - ymin) / 100 * 5
ax.set(ylim=[ymin-yd, ymax+yd])
ax.set_aspect(1)
for c in clusters:
cir = matplotlib.patches.Circle(c.centre, c.radius, alpha=0.5)
ax.add_patch(cir)
ax.scatter(*coords, color="black", marker="+", linewidth=1)
def plot_grid_pred(ax, pred):
cmap = ax.pcolormesh(*pred.mesh_data(), pred.intensity_matrix, cmap=yellow_to_red)
fig.colorbar(cmap, ax=ax)
ax.set_aspect(1)
ax.add_patch(descartes.PolygonPatch(south_side, fc="none", ec="Black"))
fig, axes = plt.subplots(ncols=2, figsize=(17,8))
plot_clusters(axes[0], trainer.data.coords, result.clusters)
plot_grid_pred(axes[1], pred)
```
## Grid and bin first
```
trainer1 = trainer.grid_coords(grid.region(), grid.xsize)
trainer1 = trainer1.bin_timestamps(np.datetime64("2017-01-01"), np.timedelta64(1, "D"))
trainer1.region = grid.region()
result1 = trainer1.predict()
result1.clusters[:10]
pred1 = result1.grid_prediction(grid.xsize)
pred1.mask_with(grid)
fig, axes = plt.subplots(ncols=2, figsize=(17,8))
plot_clusters(axes[0], trainer1.data.coords, result1.clusters)
plot_grid_pred(axes[1], pred1)
```
### Zero radius clusters
As you will see above, some of the clusters returned have zero radius (which makes sense, as having assigned all events to the middle of the grid cell they fall into, there will be clusters just consisting of the events in one grid cell).
- We cannot see these in the plot above
- But they do contribute to the gridded "risk" profile, hence the mismatch between the left and right plots.
Instead, it is possible to use the library to replace each cluster by the cluster with the same centre but with a radius enlarged to the maximum extent possible so that it contains no more events.
- This is _not_ quite the same as still asking for the clusters not to overlap, as you can see.
- It leads to a different risk profile. It is not clear to me if this is "bettr" or not...
```
pred2 = result1.grid_prediction(grid.xsize, use_maximal_clusters=True)
pred2.mask_with(grid)
fig, axes = plt.subplots(ncols=2, figsize=(17,8))
plot_clusters(axes[0], trainer1.data.coords, result1.max_clusters)
plot_grid_pred(axes[1], pred2)
```
# Optimisation work
```
time_masks, time_counts, times = scanner.make_time_ranges()
N = scanner.timestamps.shape[0]
centre = scanner.coords.T[0]
space_masks, space_counts, dists = scanner.find_discs(centre)
actual = scanner._calc_actual(space_masks, time_masks, time_counts)
expected = space_counts[:,None] * time_counts[None,:] / N
_mask = (actual > 1) & (actual > expected)
stats = scanner._ma_statistic(np.ma.array(actual, mask=~_mask),
np.ma.array(expected, mask=~_mask), N)
_mask1 = np.any(_mask, axis=1)
if not np.any(_mask1):
raise Exception()
m = np.ma.argmax(stats, axis=1)[_mask1]
stats1 = stats[_mask1,:]
stats2 = stats1[range(stats1.shape[0]),m].data
used_dists = dists[_mask1]
used_times = times[m]
%timeit( scanner.find_discs(centre) )
%timeit( np.sum(space_masks[:,:,None] & time_masks[:,None,:], axis=0) )
%timeit(scanner._calc_actual(space_masks, time_masks, time_counts))
np.testing.assert_allclose(scanner._calc_actual(space_masks, time_masks, time_counts),
np.sum(space_masks[:,:,None] & time_masks[:,None,:], axis=0))
%timeit(space_counts[:,None] * time_counts[None,:] / N)
%timeit((actual > 1) & (actual > expected))
%timeit(scanner._ma_statistic(np.ma.array(actual, mask=~_mask), np.ma.array(expected, mask=~_mask), N))
log_lookup = np.log(np.array([1] + list(range(1,N+1))))
log_lookup2 = np.log(np.array([1] + list(range(1,N*N+1))))
sh = (space_counts.shape[0], time_counts.shape[0])
s = np.ma.array(np.broadcast_to(space_counts[:,None], sh), mask=~_mask)
t = np.ma.array(np.broadcast_to(time_counts[None,:], sh), mask=~_mask)
a = np.ma.array(actual, mask=~_mask)
e = np.ma.array(s*t, mask=~_mask) / N
x1 = a * np.ma.log(a/e)
Nl = np.log(N)
aa = a.astype(np.int)
y1 = a * (Nl + log_lookup[aa] - log_lookup[s] - log_lookup[t])
assert np.ma.max(np.ma.abs(x1-y1)) < 1e-10
x2 = (N-a) * (np.ma.log(N-a) - np.ma.log(N-e))
y2 = (N-a) * (Nl + log_lookup[N-aa] - np.ma.log(N*N-s*t))
assert np.ma.max(np.ma.abs(x2-y2)) < 1e-10
aa = actual.astype(np.int)
def f():
sl = log_lookup[space_counts]
tl = log_lookup[time_counts]
st = N*N - space_counts[:,None] * time_counts[None,:]
Nl = np.log(N)
y = aa * (Nl + log_lookup[aa] - sl[:,None] - tl[None,:])
yy = (N-aa) * (Nl + log_lookup[N-aa] - log_lookup2[st])
return np.ma.array(y + yy, mask=~_mask)
stats = scanner._ma_statistic(np.ma.array(actual, mask=~_mask),
np.ma.array(expected, mask=~_mask), N)
np.ma.max(np.ma.abs(stats - f()))
%timeit(f())
%timeit(np.any(_mask, axis=1))
%timeit(np.ma.argmax(stats, axis=1)[_mask1])
%timeit(stats[_mask1,:])
%timeit(stats1[range(stats1.shape[0]),m].data)
%timeit(dists[_mask1])
%timeit(times[m])
def f():
x = scanner.faster_score_all()
return next(x)
def f1():
x = scanner.faster_score_all_new()
return next(x)
a = f()
a1 = f1()
for i in range(4):
np.testing.assert_allclose(a[i], a1[i])
# Compare against the old slow method
def find_chunk(ar, start_index):
x = (ar == ar[start_index])
end_index = start_index
while end_index < len(ar) and x[end_index]:
end_index += 1
return end_index
x = scanner.faster_score_all_old()
a2 = next(x)
start_index = 0
for index in range(len(a1[1])):
end_index = find_chunk(a2[1], start_index)
i = np.argmax(a2[3][start_index:end_index])
for j in range(1,4):
assert abs(a2[j][start_index+i] - a1[j][index]) < 1e-10
start_index = end_index
%timeit(f())
%timeit(f1())
import datetime
x = scanner.faster_score_all()
for _ in range(20):
now = datetime.datetime.now()
next(x)
print(datetime.datetime.now() - now)
next(scanner.find_all_clusters())
```
|
github_jupyter
|
%matplotlib inline
from common import *
#datadir = os.path.join("//media", "disk", "Data")
datadir = os.path.join("..", "..", "..", "..", "..", "Data")
south_side, points = load_data(datadir)
grid = grid_for_south_side()
import open_cp.stscan as stscan
import open_cp.stscan2 as stscan2
trainer = stscan.STSTrainer()
trainer.region = grid.region()
trainer.data = points
scanner, _ = trainer.to_scanner()
scanner.coords.shape, scanner.timestamps.shape
# Check how we covert the data
last_time = max(trainer.data.timestamps)
x = (last_time - trainer.data.timestamps) / np.timedelta64(1,"ms")
indexes = np.argsort(x)
np.testing.assert_allclose(x[indexes], scanner.timestamps)
np.testing.assert_allclose(trainer.data.coords[:,indexes], scanner.coords)
ts = scanner.timestamps / 1000 / 60
ts = ts[:100]
c = scanner.coords[:,:100]
stscan2.AbstractSTScan.write_to_satscan("temp", max(ts), c, ts)
max(max(ts) - ts)
scanner.timestamps = scanner.timestamps[:100]
scanner.coords = scanner.coords[:,:100]
list(scanner.find_all_clusters())
trainer1 = trainer.bin_timestamps(np.datetime64("2017-01-01"), np.timedelta64(1, "D"))
trainer1.data.number_data_points, trainer1.data.time_range
trainer1.to_satscan("test")
result = trainer1.predict()
result.statistics[:5]
trainer1 = trainer.grid_coords(grid.region(), grid.xsize)
trainer1 = trainer1.bin_timestamps(np.datetime64("2017-01-01"), np.timedelta64(1, "D"))
#trainer1.data = trainer1.data[trainer1.data.timestamps < np.datetime64("2011-04-01")]
trainer1.data.number_data_points, trainer1.data.time_range
trainer1.to_satscan("test")
result = trainer1.predict()
result.statistics[:5]
result = trainer.predict()
result.clusters[0]
pred = result.grid_prediction(grid.xsize)
pred.mask_with(grid)
import matplotlib.patches
def plot_clusters(ax, coords, clusters):
xmax, xmin = np.max(scanner.coords[0]), np.min(scanner.coords[0])
xd = (xmax - xmin) / 100 * 5
ax.set(xlim=[xmin-xd, xmax+xd])
ymax, ymin = np.max(scanner.coords[1]), np.min(scanner.coords[1])
yd = (ymax - ymin) / 100 * 5
ax.set(ylim=[ymin-yd, ymax+yd])
ax.set_aspect(1)
for c in clusters:
cir = matplotlib.patches.Circle(c.centre, c.radius, alpha=0.5)
ax.add_patch(cir)
ax.scatter(*coords, color="black", marker="+", linewidth=1)
def plot_grid_pred(ax, pred):
cmap = ax.pcolormesh(*pred.mesh_data(), pred.intensity_matrix, cmap=yellow_to_red)
fig.colorbar(cmap, ax=ax)
ax.set_aspect(1)
ax.add_patch(descartes.PolygonPatch(south_side, fc="none", ec="Black"))
fig, axes = plt.subplots(ncols=2, figsize=(17,8))
plot_clusters(axes[0], trainer.data.coords, result.clusters)
plot_grid_pred(axes[1], pred)
trainer1 = trainer.grid_coords(grid.region(), grid.xsize)
trainer1 = trainer1.bin_timestamps(np.datetime64("2017-01-01"), np.timedelta64(1, "D"))
trainer1.region = grid.region()
result1 = trainer1.predict()
result1.clusters[:10]
pred1 = result1.grid_prediction(grid.xsize)
pred1.mask_with(grid)
fig, axes = plt.subplots(ncols=2, figsize=(17,8))
plot_clusters(axes[0], trainer1.data.coords, result1.clusters)
plot_grid_pred(axes[1], pred1)
pred2 = result1.grid_prediction(grid.xsize, use_maximal_clusters=True)
pred2.mask_with(grid)
fig, axes = plt.subplots(ncols=2, figsize=(17,8))
plot_clusters(axes[0], trainer1.data.coords, result1.max_clusters)
plot_grid_pred(axes[1], pred2)
time_masks, time_counts, times = scanner.make_time_ranges()
N = scanner.timestamps.shape[0]
centre = scanner.coords.T[0]
space_masks, space_counts, dists = scanner.find_discs(centre)
actual = scanner._calc_actual(space_masks, time_masks, time_counts)
expected = space_counts[:,None] * time_counts[None,:] / N
_mask = (actual > 1) & (actual > expected)
stats = scanner._ma_statistic(np.ma.array(actual, mask=~_mask),
np.ma.array(expected, mask=~_mask), N)
_mask1 = np.any(_mask, axis=1)
if not np.any(_mask1):
raise Exception()
m = np.ma.argmax(stats, axis=1)[_mask1]
stats1 = stats[_mask1,:]
stats2 = stats1[range(stats1.shape[0]),m].data
used_dists = dists[_mask1]
used_times = times[m]
%timeit( scanner.find_discs(centre) )
%timeit( np.sum(space_masks[:,:,None] & time_masks[:,None,:], axis=0) )
%timeit(scanner._calc_actual(space_masks, time_masks, time_counts))
np.testing.assert_allclose(scanner._calc_actual(space_masks, time_masks, time_counts),
np.sum(space_masks[:,:,None] & time_masks[:,None,:], axis=0))
%timeit(space_counts[:,None] * time_counts[None,:] / N)
%timeit((actual > 1) & (actual > expected))
%timeit(scanner._ma_statistic(np.ma.array(actual, mask=~_mask), np.ma.array(expected, mask=~_mask), N))
log_lookup = np.log(np.array([1] + list(range(1,N+1))))
log_lookup2 = np.log(np.array([1] + list(range(1,N*N+1))))
sh = (space_counts.shape[0], time_counts.shape[0])
s = np.ma.array(np.broadcast_to(space_counts[:,None], sh), mask=~_mask)
t = np.ma.array(np.broadcast_to(time_counts[None,:], sh), mask=~_mask)
a = np.ma.array(actual, mask=~_mask)
e = np.ma.array(s*t, mask=~_mask) / N
x1 = a * np.ma.log(a/e)
Nl = np.log(N)
aa = a.astype(np.int)
y1 = a * (Nl + log_lookup[aa] - log_lookup[s] - log_lookup[t])
assert np.ma.max(np.ma.abs(x1-y1)) < 1e-10
x2 = (N-a) * (np.ma.log(N-a) - np.ma.log(N-e))
y2 = (N-a) * (Nl + log_lookup[N-aa] - np.ma.log(N*N-s*t))
assert np.ma.max(np.ma.abs(x2-y2)) < 1e-10
aa = actual.astype(np.int)
def f():
sl = log_lookup[space_counts]
tl = log_lookup[time_counts]
st = N*N - space_counts[:,None] * time_counts[None,:]
Nl = np.log(N)
y = aa * (Nl + log_lookup[aa] - sl[:,None] - tl[None,:])
yy = (N-aa) * (Nl + log_lookup[N-aa] - log_lookup2[st])
return np.ma.array(y + yy, mask=~_mask)
stats = scanner._ma_statistic(np.ma.array(actual, mask=~_mask),
np.ma.array(expected, mask=~_mask), N)
np.ma.max(np.ma.abs(stats - f()))
%timeit(f())
%timeit(np.any(_mask, axis=1))
%timeit(np.ma.argmax(stats, axis=1)[_mask1])
%timeit(stats[_mask1,:])
%timeit(stats1[range(stats1.shape[0]),m].data)
%timeit(dists[_mask1])
%timeit(times[m])
def f():
x = scanner.faster_score_all()
return next(x)
def f1():
x = scanner.faster_score_all_new()
return next(x)
a = f()
a1 = f1()
for i in range(4):
np.testing.assert_allclose(a[i], a1[i])
# Compare against the old slow method
def find_chunk(ar, start_index):
x = (ar == ar[start_index])
end_index = start_index
while end_index < len(ar) and x[end_index]:
end_index += 1
return end_index
x = scanner.faster_score_all_old()
a2 = next(x)
start_index = 0
for index in range(len(a1[1])):
end_index = find_chunk(a2[1], start_index)
i = np.argmax(a2[3][start_index:end_index])
for j in range(1,4):
assert abs(a2[j][start_index+i] - a1[j][index]) < 1e-10
start_index = end_index
%timeit(f())
%timeit(f1())
import datetime
x = scanner.faster_score_all()
for _ in range(20):
now = datetime.datetime.now()
next(x)
print(datetime.datetime.now() - now)
next(scanner.find_all_clusters())
| 0.459319 | 0.937268 |
# 1. SETTINGS
```
import numpy as np
import pandas as pd
pd.set_option("display.max_columns", None)
import matplotlib.pyplot as plt
import seaborn as sns
import lightgbm as lgb
import os
import time
import multiprocessing
from sklearn.metrics import log_loss
from sklearn.model_selection import KFold
from sklearn.metrics import confusion_matrix
import tsfresh
from tsfresh import extract_features
from tsfresh.utilities.dataframe_functions import impute
import warnings
warnings.filterwarnings('ignore')
import gc
gc.enable()
### FUNCTION 4
def remove_bands(df):
##### INDIVIDUAL VARIABLES
# extract some bands
t2 = df.loc[:, df.columns.str.endswith('_p2')].divide(3)
t3 = df.loc[:, df.columns.str.endswith('_p3')].divide(3)
t4 = df.loc[:, df.columns.str.endswith('_p4')].divide(3)
# rename columns
t2.columns = [col.replace("_p2", "_p234") for col in t2.columns]
t3.columns = [col.replace("_p3", "_p234") for col in t3.columns]
t4.columns = [col.replace("_p4", "_p234") for col in t4.columns]
# average
t234 = t2.add(t3)
t234 = t234.add(t4)
# remove individual bands
df = df.loc[:, ~df.columns.str.endswith('_p2')]
df = df.loc[:, ~df.columns.str.endswith('_p3')]
df = df.loc[:, ~df.columns.str.endswith('_p4')]
# merge averaged band
df = pd.concat([df, t234], axis = 1)
##### PASSBAND RATIOS
# extract some bands
t2 = df.filter(like = 'p2_p0').divide(3)
t3 = df.filter(like = 'p3_p0').divide(3)
t4 = df.filter(like = 'p4_p0').divide(3)
# rename columns
t2.columns = [col.replace("p2_p0", "p234_p0") for col in t2.columns]
t3.columns = [col.replace("p3_p0", "p234_p0") for col in t3.columns]
t4.columns = [col.replace("p4_p0", "p234_p0") for col in t4.columns]
# average
t234 = t2.add(t3)
t234 = t234.add(t4)
# remove individual bands
#drops = list(df.filter(like = 'p2_p0').columns) + list(df.filter(like = 'p3_p0').columns) + list(df.filter(like = 'p4_p0').columns)
#keeps = [f for f in df.columns if f not in drops]
#df = df[keeps]
# merge averaged band
df = pd.concat([df, t234], axis = 1)
return df
### FUNCTION 5
def add_dist_ratios(df):
# compute ratios
df['dist_by_med_flux_p0'] = df['distmod'] - df['flux_median_p0']
df['dist_by_med_flux_p1'] = df['distmod'] - df['flux_median_p1']
df['dist_by_med_flux_p2'] = df['distmod'] - df['flux_median_p2']
df['dist_by_med_flux_p3'] = df['distmod'] - df['flux_median_p3']
df['dist_by_med_flux_p4'] = df['distmod'] - df['flux_median_p4']
df['dist_by_med_flux_p5'] = df['distmod'] - df['flux_median_p5']
return df
```
# 2. DATA PREPARATION
## TRAIN
## TEST
## MERGER AND SCALING
```
### IMPORT READY DATA
data = pd.read_csv('../input/data_v10_merged.csv')
data.shape
# drop some features
oof_df = data[['object_id']]
del data['object_id'], data['hostgal_specz']
# impute inf & null
data.replace(to_replace = [-np.inf, np.inf], value = np.nan, inplace = True)
data_mean = data.median(axis = 0, skipna = True)
data.fillna(data_mean, inplace = True)
data = data.astype('float32')
# rescale
from sklearn.preprocessing import StandardScaler, MinMaxScaler
ss = MinMaxScaler()
data = ss.fit_transform(data)
data = pd.DataFrame(data)
data.shape
```
# 3. AUTOENCODER
```
# libraries
import keras
from keras.datasets import mnist
from keras.models import Model, Sequential
from keras.layers import Input, Dense, Flatten, Reshape
from keras import regularizers
from keras import backend as K
# parameters
encoding_dim = 30
num_epochs = 50
num_batch = 250
# clear session
K.clear_session()
### AUTOENCODER
# dimensions
input_dim = data.shape[1]
# architecture type
autoencoder = Sequential()
# encoder layers
autoencoder.add(Dense(4 * encoding_dim, input_shape = (input_dim,), activation = 'relu'))
autoencoder.add(Dense(2 * encoding_dim, activation = 'relu'))
autoencoder.add(Dense(encoding_dim, activation = 'relu'))
# decoder layers
autoencoder.add(Dense(2 * encoding_dim, activation = 'relu'))
autoencoder.add(Dense(4 * encoding_dim, activation = 'relu'))
autoencoder.add(Dense(input_dim, activation = 'sigmoid'))
### ENCODER PART
# dimensions
input_img = Input(shape = (input_dim, ))
# encoder layers
encoder_layer1 = autoencoder.layers[0]
encoder_layer2 = autoencoder.layers[1]
encoder_layer3 = autoencoder.layers[2]
encoder = Model(input_img, encoder_layer3(encoder_layer2(encoder_layer1(input_img))))
### MODELING
# compile
autoencoder.compile(optimizer = 'adam', loss = 'binary_crossentropy')
# fit
autoencoder.fit(data, data,
epochs = num_epochs,
batch_size = num_batch)
# predict
oof_preds = encoder.predict(data)
preds = pd.DataFrame(oof_preds)
preds.columns = ['auto' + str(l) for l in list(preds.columns)]
preds.insert(loc = 0, column = 'object_id', value = oof_df.object_id.reset_index(drop = True))
preds.describe()
preds.to_csv('../input/auto_f30_b250_e50.csv', index = False)
preds.shape
```
# 4. CV
|
github_jupyter
|
import numpy as np
import pandas as pd
pd.set_option("display.max_columns", None)
import matplotlib.pyplot as plt
import seaborn as sns
import lightgbm as lgb
import os
import time
import multiprocessing
from sklearn.metrics import log_loss
from sklearn.model_selection import KFold
from sklearn.metrics import confusion_matrix
import tsfresh
from tsfresh import extract_features
from tsfresh.utilities.dataframe_functions import impute
import warnings
warnings.filterwarnings('ignore')
import gc
gc.enable()
### FUNCTION 4
def remove_bands(df):
##### INDIVIDUAL VARIABLES
# extract some bands
t2 = df.loc[:, df.columns.str.endswith('_p2')].divide(3)
t3 = df.loc[:, df.columns.str.endswith('_p3')].divide(3)
t4 = df.loc[:, df.columns.str.endswith('_p4')].divide(3)
# rename columns
t2.columns = [col.replace("_p2", "_p234") for col in t2.columns]
t3.columns = [col.replace("_p3", "_p234") for col in t3.columns]
t4.columns = [col.replace("_p4", "_p234") for col in t4.columns]
# average
t234 = t2.add(t3)
t234 = t234.add(t4)
# remove individual bands
df = df.loc[:, ~df.columns.str.endswith('_p2')]
df = df.loc[:, ~df.columns.str.endswith('_p3')]
df = df.loc[:, ~df.columns.str.endswith('_p4')]
# merge averaged band
df = pd.concat([df, t234], axis = 1)
##### PASSBAND RATIOS
# extract some bands
t2 = df.filter(like = 'p2_p0').divide(3)
t3 = df.filter(like = 'p3_p0').divide(3)
t4 = df.filter(like = 'p4_p0').divide(3)
# rename columns
t2.columns = [col.replace("p2_p0", "p234_p0") for col in t2.columns]
t3.columns = [col.replace("p3_p0", "p234_p0") for col in t3.columns]
t4.columns = [col.replace("p4_p0", "p234_p0") for col in t4.columns]
# average
t234 = t2.add(t3)
t234 = t234.add(t4)
# remove individual bands
#drops = list(df.filter(like = 'p2_p0').columns) + list(df.filter(like = 'p3_p0').columns) + list(df.filter(like = 'p4_p0').columns)
#keeps = [f for f in df.columns if f not in drops]
#df = df[keeps]
# merge averaged band
df = pd.concat([df, t234], axis = 1)
return df
### FUNCTION 5
def add_dist_ratios(df):
# compute ratios
df['dist_by_med_flux_p0'] = df['distmod'] - df['flux_median_p0']
df['dist_by_med_flux_p1'] = df['distmod'] - df['flux_median_p1']
df['dist_by_med_flux_p2'] = df['distmod'] - df['flux_median_p2']
df['dist_by_med_flux_p3'] = df['distmod'] - df['flux_median_p3']
df['dist_by_med_flux_p4'] = df['distmod'] - df['flux_median_p4']
df['dist_by_med_flux_p5'] = df['distmod'] - df['flux_median_p5']
return df
### IMPORT READY DATA
data = pd.read_csv('../input/data_v10_merged.csv')
data.shape
# drop some features
oof_df = data[['object_id']]
del data['object_id'], data['hostgal_specz']
# impute inf & null
data.replace(to_replace = [-np.inf, np.inf], value = np.nan, inplace = True)
data_mean = data.median(axis = 0, skipna = True)
data.fillna(data_mean, inplace = True)
data = data.astype('float32')
# rescale
from sklearn.preprocessing import StandardScaler, MinMaxScaler
ss = MinMaxScaler()
data = ss.fit_transform(data)
data = pd.DataFrame(data)
data.shape
# libraries
import keras
from keras.datasets import mnist
from keras.models import Model, Sequential
from keras.layers import Input, Dense, Flatten, Reshape
from keras import regularizers
from keras import backend as K
# parameters
encoding_dim = 30
num_epochs = 50
num_batch = 250
# clear session
K.clear_session()
### AUTOENCODER
# dimensions
input_dim = data.shape[1]
# architecture type
autoencoder = Sequential()
# encoder layers
autoencoder.add(Dense(4 * encoding_dim, input_shape = (input_dim,), activation = 'relu'))
autoencoder.add(Dense(2 * encoding_dim, activation = 'relu'))
autoencoder.add(Dense(encoding_dim, activation = 'relu'))
# decoder layers
autoencoder.add(Dense(2 * encoding_dim, activation = 'relu'))
autoencoder.add(Dense(4 * encoding_dim, activation = 'relu'))
autoencoder.add(Dense(input_dim, activation = 'sigmoid'))
### ENCODER PART
# dimensions
input_img = Input(shape = (input_dim, ))
# encoder layers
encoder_layer1 = autoencoder.layers[0]
encoder_layer2 = autoencoder.layers[1]
encoder_layer3 = autoencoder.layers[2]
encoder = Model(input_img, encoder_layer3(encoder_layer2(encoder_layer1(input_img))))
### MODELING
# compile
autoencoder.compile(optimizer = 'adam', loss = 'binary_crossentropy')
# fit
autoencoder.fit(data, data,
epochs = num_epochs,
batch_size = num_batch)
# predict
oof_preds = encoder.predict(data)
preds = pd.DataFrame(oof_preds)
preds.columns = ['auto' + str(l) for l in list(preds.columns)]
preds.insert(loc = 0, column = 'object_id', value = oof_df.object_id.reset_index(drop = True))
preds.describe()
preds.to_csv('../input/auto_f30_b250_e50.csv', index = False)
preds.shape
| 0.52902 | 0.621555 |
# Image Classification with Transfer Learning
1. [Introduction](#Introduction)
2. [Prerequisites and Preprocessing](#Prequisites-and-Preprocessing)
3. [Fine-tuning the Image classification model](#Fine-tuning-the-Image-classification-model)
4. [Training parameters](#Training-parameters)
5. [Start the training](#Start-the-training)
6. [Inference](#Inference)
## Introduction
Image classification is a fundamental computer vision task that involves predicting the overall label/class of an image. Modern computer vision techniques use neural net models for these kinds of tasks. Although neural nets can achieve high accuracy for image classification, they can be quite difficult to use directly. Amazon SageMaker's built-in image classification algorithm makes such neural nets much easier to use; simply provide your dataset and specify a few parameters, and you can train and deploy a custom model.
This notebook is an end-to-end example of image classification in transfer learning mode to "fine-tune" a pretrained model. Fine-tuning typically results in substantial time and cost savings compared to training from scratch. We'll use SageMaker's built-in image classification algorithm in transfer learning mode to fine-tune a pretrained model previously trained on the well-known public ImageNet dataset. This fine-tuned model will be used to classify a new dataset different from ImageNet. In particular, the pretrained model will be fine-tuned with the [Caltech-256 dataset](http://www.vision.caltech.edu/Image_Datasets/Caltech256/).
SageMaker's built-in image classification algorithm has an option for training from scratch as well as transfer learning. Using the built-in algorithm's transfer learning mode frees you from having to modify the underlying neural net architecture, which otherwise would be necessary if you used the neural net directly rather than SageMaker's built-in algorithm. There are many other conveniences provided by this built-in algorithm, such as the ability to automatically train faster on a cluster of many instances without requiring you to manage cluster setup and teardown.
To get started, we need to set up the environment with a few prerequisite steps, for permissions, configurations, and so on.
## Prequisites and Preprocessing
### Permissions and environment variables
Here we set up the linkage and authentication for AWS services. There are three parts to this:
* The IAM role used to give learning and hosting access to your data. This will be obtained from the role used to start the notebook.
* The S3 bucket that you want to use for training and model data.
* The Amazon SageMaker image classification algoritm Docker image which you can use out of the box, without modifications.
```
%%time
import sagemaker
from sagemaker import get_execution_role
role = get_execution_role()
print(role)
sess = sagemaker.Session()
bucket = sess.default_bucket()
prefix = 'ic-transfer-learning'
from sagemaker.amazon.amazon_estimator import get_image_uri
training_image = get_image_uri(sess.boto_region_name, 'image-classification', repo_version="latest")
print (training_image)
```
###### Fine-tuning the Image Classification model
The Caltech 256 dataset consist of images from 256 categories plus a clutter category. It has a total of 30000 images, with a minimum of 80 images and a maximum of about 800 images per category.
The image classification algorithm can take two types of input formats. The first is a [recordio format](https://mxnet.incubator.apache.org/faq/recordio.html), and the other is a [lst format](https://mxnet.incubator.apache.org/faq/recordio.html?highlight=im2rec). In this example, we will use the recordio format.
```
import os
import urllib.request
import boto3
def download(url):
filename = url.split("/")[-1]
if not os.path.exists(filename):
urllib.request.urlretrieve(url, filename)
def upload_to_s3(channel, file):
s3 = boto3.resource('s3')
data = open(file, "rb")
key = channel + '/' + file
s3.Bucket(bucket).put_object(Key=key, Body=data)
# # caltech-256
download('http://data.mxnet.io/data/caltech-256/caltech-256-60-train.rec')
download('http://data.mxnet.io/data/caltech-256/caltech-256-60-val.rec')
upload_to_s3('validation', 'caltech-256-60-val.rec')
upload_to_s3('train', 'caltech-256-60-train.rec')
```
Next, we'll upload the data to Amazon S3 so it can be accessed by SageMaker for model training.
```
# Four channels: train, validation, train_lst, and validation_lst
s3train = 's3://{}/{}/train/'.format(bucket, prefix)
s3validation = 's3://{}/{}/validation/'.format(bucket, prefix)
# upload the lst files to train and validation channels
!aws s3 cp caltech-256-60-train.rec $s3train --quiet
!aws s3 cp caltech-256-60-val.rec $s3validation --quiet
```
Once we have the data available in S3 in the correct format for training, the next step is to actually train the model using the data. Before training the model, we need to setup the training parameters. The next section will explain the parameters in detail and dive into how to set up the training job.
## Training
Now that we are done with the data setup, we are almost ready to train our image classfication model. To begin, let us create a ``sageMaker.estimator.Estimator`` object. This Estimator will launch the training job.
### Training parameters
There are two kinds of parameters that need to be set for training. The first kind is the parameters for the training job itself, such as amount and type of hardware to use, and data locations. For this example, these include:
* **Training instance count**: This is the number of instances on which to run the training. When the number of instances is greater than one, then the image classification algorithm will run in a distributed cluster automatically without requiring you to manage cluster setup.
* **Training instance type**: This indicates the type of machine on which to run the training. Typically, we use GPU instances for computer vision models such as this one.
* **Output path**: This the S3 folder in which the training output will be stored.
```
s3_output_location = 's3://{}/{}/output'.format(bucket, prefix)
ic = sagemaker.estimator.Estimator(training_image,
role,
train_instance_count=1,
train_instance_type='ml.p3.8xlarge',
train_volume_size = 50,
train_max_run = 360000,
input_mode= 'File',
output_path=s3_output_location,
sagemaker_session=sess)
```
Apart from the above set of parameters, there are hyperparameters that are specific to the algorithm. These include:
* **num_layers**: The number of layers (depth) for the network. We use 18 in this example, but other values such as 50, 152 can be used to achieve greater accuracy at the cost of longer training time.
* **use_pretrained_model**: Set to 1 to use a pretrained model for transfer learning.
* **image_shape**: The input image dimensions,'num_channels, height, width', for the network. It should be no larger than the actual image size. The number of channels should be same as the actual image.
* **num_classes**: This is the number of output classes for the new dataset. Imagenet has 1000 classes, but the number of output classes for our pretrained network can be changed with fine-tuning. For this Caltech dataset, we use 257 because it has 256 object categories + 1 clutter class.
* **num_training_samples**: This is the total number of training samples. It is set to 15240 for the Caltech dataset due to the current split between training and validation data.
* **mini_batch_size**: The number of training samples used for each mini batch. In distributed training for multiple training instances (we just use one here), the number of training samples used per batch would be N * mini_batch_size, where N is the number of hosts on which training is run.
* **epochs**: Number of training epochs, i.e. passes over the complete training data.
* **learning_rate**: Learning rate for training.
* **precision_dtype**: Training datatype precision (default: float32). If set to 'float16', the training will be done in mixed_precision mode and will be faster than float32 mode, at the cost of slightly less accuracy.
```
ic.set_hyperparameters(num_layers=18,
use_pretrained_model=1,
image_shape = "3,224,224",
num_classes=257,
num_training_samples=15420,
mini_batch_size=128,
epochs=2,
learning_rate=0.01,
precision_dtype='float32')
```
## Input data specification
The next step is to set the data type and channels used for training. The channel definitions inform SageMaker about where to find both the training and validation datasets in S3.
```
train_data = sagemaker.session.s3_input(s3train, distribution='FullyReplicated',
content_type='application/x-recordio', s3_data_type='S3Prefix')
validation_data = sagemaker.session.s3_input(s3validation, distribution='FullyReplicated',
content_type='application/x-recordio', s3_data_type='S3Prefix')
data_channels = {'train': train_data, 'validation': validation_data}
```
## Start the training
Now we can start the training job by calling the `fit` method of the Estimator object.
```
ic.fit(inputs=data_channels, logs=True)
```
# Inference
***
A trained model does nothing on its own. We now want to use the model to perform inference, i.e. get predictions from the model. For this example, that means predicting the Caltech-256 class of a given image. To deploy the trained model, we simply use the `deploy` method of the Estimator. This will create a SageMaker endpoint that can return predictions in real time, for example for use with a consumer-facing app that must have low latency responses to user requests. SageMaker also can perform offline batch, asynchronous inference with its Batch Transform feature.
```
ic_classifier = ic.deploy(initial_instance_count = 1,
instance_type = 'ml.m5.xlarge')
```
### Download a test image
```
!wget -O test.jpg https://raw.githubusercontent.com/awslabs/amazon-sagemaker-workshop/master/images/clawfoot_bathtub.jpg
file_name = 'test.jpg'
# test image
from IPython.display import Image
Image(file_name)
```
### Evaluation
Let's now use the SageMaker endpoint hosting the trained model to predict the Caltech-256 class of the test image. The model outputs class probabilities. Typically, one selects the class with the maximum probability as the final predicted class output.
**Note:** Although the output class detected by the network is likely to predict the correct class (bathtub), it is not guaranteed to be accurate as model training is a stochastic process. To limit the training time and related cost, we have trained the model only for a couple of epochs. If the model is trained for more epochs (say 20), the output class will be more accurate.
```
import json
import numpy as np
with open(file_name, 'rb') as f:
payload = f.read()
payload = bytearray(payload)
ic_classifier.content_type = 'application/x-image'
result = json.loads(ic_classifier.predict(payload))
# the result will output the probabilities for all classes
# find the class with maximum probability and print the class index
index = np.argmax(result)
object_categories = ['ak47', 'american-flag', 'backpack', 'baseball-bat', 'baseball-glove', 'basketball-hoop', 'bat', 'bathtub', 'bear', 'beer-mug', 'billiards', 'binoculars', 'birdbath', 'blimp', 'bonsai-101', 'boom-box', 'bowling-ball', 'bowling-pin', 'boxing-glove', 'brain-101', 'breadmaker', 'buddha-101', 'bulldozer', 'butterfly', 'cactus', 'cake', 'calculator', 'camel', 'cannon', 'canoe', 'car-tire', 'cartman', 'cd', 'centipede', 'cereal-box', 'chandelier-101', 'chess-board', 'chimp', 'chopsticks', 'cockroach', 'coffee-mug', 'coffin', 'coin', 'comet', 'computer-keyboard', 'computer-monitor', 'computer-mouse', 'conch', 'cormorant', 'covered-wagon', 'cowboy-hat', 'crab-101', 'desk-globe', 'diamond-ring', 'dice', 'dog', 'dolphin-101', 'doorknob', 'drinking-straw', 'duck', 'dumb-bell', 'eiffel-tower', 'electric-guitar-101', 'elephant-101', 'elk', 'ewer-101', 'eyeglasses', 'fern', 'fighter-jet', 'fire-extinguisher', 'fire-hydrant', 'fire-truck', 'fireworks', 'flashlight', 'floppy-disk', 'football-helmet', 'french-horn', 'fried-egg', 'frisbee', 'frog', 'frying-pan', 'galaxy', 'gas-pump', 'giraffe', 'goat', 'golden-gate-bridge', 'goldfish', 'golf-ball', 'goose', 'gorilla', 'grand-piano-101', 'grapes', 'grasshopper', 'guitar-pick', 'hamburger', 'hammock', 'harmonica', 'harp', 'harpsichord', 'hawksbill-101', 'head-phones', 'helicopter-101', 'hibiscus', 'homer-simpson', 'horse', 'horseshoe-crab', 'hot-air-balloon', 'hot-dog', 'hot-tub', 'hourglass', 'house-fly', 'human-skeleton', 'hummingbird', 'ibis-101', 'ice-cream-cone', 'iguana', 'ipod', 'iris', 'jesus-christ', 'joy-stick', 'kangaroo-101', 'kayak', 'ketch-101', 'killer-whale', 'knife', 'ladder', 'laptop-101', 'lathe', 'leopards-101', 'license-plate', 'lightbulb', 'light-house', 'lightning', 'llama-101', 'mailbox', 'mandolin', 'mars', 'mattress', 'megaphone', 'menorah-101', 'microscope', 'microwave', 'minaret', 'minotaur', 'motorbikes-101', 'mountain-bike', 'mushroom', 'mussels', 'necktie', 'octopus', 'ostrich', 'owl', 'palm-pilot', 'palm-tree', 'paperclip', 'paper-shredder', 'pci-card', 'penguin', 'people', 'pez-dispenser', 'photocopier', 'picnic-table', 'playing-card', 'porcupine', 'pram', 'praying-mantis', 'pyramid', 'raccoon', 'radio-telescope', 'rainbow', 'refrigerator', 'revolver-101', 'rifle', 'rotary-phone', 'roulette-wheel', 'saddle', 'saturn', 'school-bus', 'scorpion-101', 'screwdriver', 'segway', 'self-propelled-lawn-mower', 'sextant', 'sheet-music', 'skateboard', 'skunk', 'skyscraper', 'smokestack', 'snail', 'snake', 'sneaker', 'snowmobile', 'soccer-ball', 'socks', 'soda-can', 'spaghetti', 'speed-boat', 'spider', 'spoon', 'stained-glass', 'starfish-101', 'steering-wheel', 'stirrups', 'sunflower-101', 'superman', 'sushi', 'swan', 'swiss-army-knife', 'sword', 'syringe', 'tambourine', 'teapot', 'teddy-bear', 'teepee', 'telephone-box', 'tennis-ball', 'tennis-court', 'tennis-racket', 'theodolite', 'toaster', 'tomato', 'tombstone', 'top-hat', 'touring-bike', 'tower-pisa', 'traffic-light', 'treadmill', 'triceratops', 'tricycle', 'trilobite-101', 'tripod', 't-shirt', 'tuning-fork', 'tweezer', 'umbrella-101', 'unicorn', 'vcr', 'video-projector', 'washing-machine', 'watch-101', 'waterfall', 'watermelon', 'welding-mask', 'wheelbarrow', 'windmill', 'wine-bottle', 'xylophone', 'yarmulke', 'yo-yo', 'zebra', 'airplanes-101', 'car-side-101', 'faces-easy-101', 'greyhound', 'tennis-shoes', 'toad', 'clutter']
print("Result: label - " + object_categories[index] + ", probability - " + str(result[index]))
```
### Clean up
When we're done with the endpoint, we can just delete it and the backing instance will be released. Run the following cell to delete the endpoint.
```
ic_classifier.delete_endpoint()
```
|
github_jupyter
|
%%time
import sagemaker
from sagemaker import get_execution_role
role = get_execution_role()
print(role)
sess = sagemaker.Session()
bucket = sess.default_bucket()
prefix = 'ic-transfer-learning'
from sagemaker.amazon.amazon_estimator import get_image_uri
training_image = get_image_uri(sess.boto_region_name, 'image-classification', repo_version="latest")
print (training_image)
import os
import urllib.request
import boto3
def download(url):
filename = url.split("/")[-1]
if not os.path.exists(filename):
urllib.request.urlretrieve(url, filename)
def upload_to_s3(channel, file):
s3 = boto3.resource('s3')
data = open(file, "rb")
key = channel + '/' + file
s3.Bucket(bucket).put_object(Key=key, Body=data)
# # caltech-256
download('http://data.mxnet.io/data/caltech-256/caltech-256-60-train.rec')
download('http://data.mxnet.io/data/caltech-256/caltech-256-60-val.rec')
upload_to_s3('validation', 'caltech-256-60-val.rec')
upload_to_s3('train', 'caltech-256-60-train.rec')
# Four channels: train, validation, train_lst, and validation_lst
s3train = 's3://{}/{}/train/'.format(bucket, prefix)
s3validation = 's3://{}/{}/validation/'.format(bucket, prefix)
# upload the lst files to train and validation channels
!aws s3 cp caltech-256-60-train.rec $s3train --quiet
!aws s3 cp caltech-256-60-val.rec $s3validation --quiet
s3_output_location = 's3://{}/{}/output'.format(bucket, prefix)
ic = sagemaker.estimator.Estimator(training_image,
role,
train_instance_count=1,
train_instance_type='ml.p3.8xlarge',
train_volume_size = 50,
train_max_run = 360000,
input_mode= 'File',
output_path=s3_output_location,
sagemaker_session=sess)
ic.set_hyperparameters(num_layers=18,
use_pretrained_model=1,
image_shape = "3,224,224",
num_classes=257,
num_training_samples=15420,
mini_batch_size=128,
epochs=2,
learning_rate=0.01,
precision_dtype='float32')
train_data = sagemaker.session.s3_input(s3train, distribution='FullyReplicated',
content_type='application/x-recordio', s3_data_type='S3Prefix')
validation_data = sagemaker.session.s3_input(s3validation, distribution='FullyReplicated',
content_type='application/x-recordio', s3_data_type='S3Prefix')
data_channels = {'train': train_data, 'validation': validation_data}
ic.fit(inputs=data_channels, logs=True)
ic_classifier = ic.deploy(initial_instance_count = 1,
instance_type = 'ml.m5.xlarge')
!wget -O test.jpg https://raw.githubusercontent.com/awslabs/amazon-sagemaker-workshop/master/images/clawfoot_bathtub.jpg
file_name = 'test.jpg'
# test image
from IPython.display import Image
Image(file_name)
import json
import numpy as np
with open(file_name, 'rb') as f:
payload = f.read()
payload = bytearray(payload)
ic_classifier.content_type = 'application/x-image'
result = json.loads(ic_classifier.predict(payload))
# the result will output the probabilities for all classes
# find the class with maximum probability and print the class index
index = np.argmax(result)
object_categories = ['ak47', 'american-flag', 'backpack', 'baseball-bat', 'baseball-glove', 'basketball-hoop', 'bat', 'bathtub', 'bear', 'beer-mug', 'billiards', 'binoculars', 'birdbath', 'blimp', 'bonsai-101', 'boom-box', 'bowling-ball', 'bowling-pin', 'boxing-glove', 'brain-101', 'breadmaker', 'buddha-101', 'bulldozer', 'butterfly', 'cactus', 'cake', 'calculator', 'camel', 'cannon', 'canoe', 'car-tire', 'cartman', 'cd', 'centipede', 'cereal-box', 'chandelier-101', 'chess-board', 'chimp', 'chopsticks', 'cockroach', 'coffee-mug', 'coffin', 'coin', 'comet', 'computer-keyboard', 'computer-monitor', 'computer-mouse', 'conch', 'cormorant', 'covered-wagon', 'cowboy-hat', 'crab-101', 'desk-globe', 'diamond-ring', 'dice', 'dog', 'dolphin-101', 'doorknob', 'drinking-straw', 'duck', 'dumb-bell', 'eiffel-tower', 'electric-guitar-101', 'elephant-101', 'elk', 'ewer-101', 'eyeglasses', 'fern', 'fighter-jet', 'fire-extinguisher', 'fire-hydrant', 'fire-truck', 'fireworks', 'flashlight', 'floppy-disk', 'football-helmet', 'french-horn', 'fried-egg', 'frisbee', 'frog', 'frying-pan', 'galaxy', 'gas-pump', 'giraffe', 'goat', 'golden-gate-bridge', 'goldfish', 'golf-ball', 'goose', 'gorilla', 'grand-piano-101', 'grapes', 'grasshopper', 'guitar-pick', 'hamburger', 'hammock', 'harmonica', 'harp', 'harpsichord', 'hawksbill-101', 'head-phones', 'helicopter-101', 'hibiscus', 'homer-simpson', 'horse', 'horseshoe-crab', 'hot-air-balloon', 'hot-dog', 'hot-tub', 'hourglass', 'house-fly', 'human-skeleton', 'hummingbird', 'ibis-101', 'ice-cream-cone', 'iguana', 'ipod', 'iris', 'jesus-christ', 'joy-stick', 'kangaroo-101', 'kayak', 'ketch-101', 'killer-whale', 'knife', 'ladder', 'laptop-101', 'lathe', 'leopards-101', 'license-plate', 'lightbulb', 'light-house', 'lightning', 'llama-101', 'mailbox', 'mandolin', 'mars', 'mattress', 'megaphone', 'menorah-101', 'microscope', 'microwave', 'minaret', 'minotaur', 'motorbikes-101', 'mountain-bike', 'mushroom', 'mussels', 'necktie', 'octopus', 'ostrich', 'owl', 'palm-pilot', 'palm-tree', 'paperclip', 'paper-shredder', 'pci-card', 'penguin', 'people', 'pez-dispenser', 'photocopier', 'picnic-table', 'playing-card', 'porcupine', 'pram', 'praying-mantis', 'pyramid', 'raccoon', 'radio-telescope', 'rainbow', 'refrigerator', 'revolver-101', 'rifle', 'rotary-phone', 'roulette-wheel', 'saddle', 'saturn', 'school-bus', 'scorpion-101', 'screwdriver', 'segway', 'self-propelled-lawn-mower', 'sextant', 'sheet-music', 'skateboard', 'skunk', 'skyscraper', 'smokestack', 'snail', 'snake', 'sneaker', 'snowmobile', 'soccer-ball', 'socks', 'soda-can', 'spaghetti', 'speed-boat', 'spider', 'spoon', 'stained-glass', 'starfish-101', 'steering-wheel', 'stirrups', 'sunflower-101', 'superman', 'sushi', 'swan', 'swiss-army-knife', 'sword', 'syringe', 'tambourine', 'teapot', 'teddy-bear', 'teepee', 'telephone-box', 'tennis-ball', 'tennis-court', 'tennis-racket', 'theodolite', 'toaster', 'tomato', 'tombstone', 'top-hat', 'touring-bike', 'tower-pisa', 'traffic-light', 'treadmill', 'triceratops', 'tricycle', 'trilobite-101', 'tripod', 't-shirt', 'tuning-fork', 'tweezer', 'umbrella-101', 'unicorn', 'vcr', 'video-projector', 'washing-machine', 'watch-101', 'waterfall', 'watermelon', 'welding-mask', 'wheelbarrow', 'windmill', 'wine-bottle', 'xylophone', 'yarmulke', 'yo-yo', 'zebra', 'airplanes-101', 'car-side-101', 'faces-easy-101', 'greyhound', 'tennis-shoes', 'toad', 'clutter']
print("Result: label - " + object_categories[index] + ", probability - " + str(result[index]))
ic_classifier.delete_endpoint()
| 0.350199 | 0.991891 |
# Module 2 Assessment
Welcome to the assessment for Module 2: Mapping for Planning. In this assessment, you will be generating an occupancy grid using lidar scanner measurements from a moving vehicle in an unknown environment. You will use the inverse scanner measurement model developed in the lessons to map these measurements into occupancy probabilities, and then perform iterative logodds updates to an occupancy grid belief map. After the car has gathered enough data, your occupancy grid should converge to the true map.
In this assessment, you will:
* Gather range measurements of a moving car's surroundings using a lidar scanning function.
* Extract occupancy information from the range measurements using an inverse scanner model.
* Perform logodds updates on an occupancy grids based on incoming measurements.
* Iteratively construct a probabilistic occupancy grid from those log odds updates.
For most exercises, you are provided with a suggested outline. You are encouraged to diverge from the outline if you think there is a better, more efficient way to solve a problem.
Launch the Jupyter Notebook to begin!
```
import numpy as np
import math
import matplotlib.pyplot as plt
import matplotlib.animation as anim
from IPython.display import HTML
```
In this notebook, you will generate an occupancy grid based off of multiple simulated lidar scans. The inverse scanner model will be given to you, in the `inverse_scanner()` function. It returns a matrix of measured occupancy probability values based on the lidar scan model discussed in the video lectures. The `get_ranges()` function actually returns the scanned ranges value for a given vehicle position and scanner bearing. These two functions are given below. Make sure you understand what they are doing, as you will need to use them later in the notebook.
```
# Calculates the inverse measurement model for a laser scanner.
# It identifies three regions. The first where no information is available occurs
# outside of the scanning arc. The second where objects are likely to exist, at the
# end of the range measurement within the arc. The third are where objects are unlikely
# to exist, within the arc but with less distance than the range measurement.
def inverse_scanner(num_rows, num_cols, x, y, theta, meas_phi, meas_r, rmax, alpha, beta):
m = np.zeros((M, N))
for i in range(num_rows):
for j in range(num_cols):
# Find range and bearing relative to the input state (x, y, theta).
r = math.sqrt((i - x)**2 + (j - y)**2)
phi = (math.atan2(j - y, i - x) - theta + math.pi) % (2 * math.pi) - math.pi
# Find the range measurement associated with the relative bearing.
k = np.argmin(np.abs(np.subtract(phi, meas_phi)))
# If the range is greater than the maximum sensor range, or behind our range
# measurement, or is outside of the field of view of the sensor, then no
# new information is available.
if (r > min(rmax, meas_r[k] + alpha / 2.0)) or (abs(phi - meas_phi[k]) > beta / 2.0):
m[i, j] = 0.5
# If the range measurement lied within this cell, it is likely to be an object.
elif (meas_r[k] < rmax) and (abs(r - meas_r[k]) < alpha / 2.0):
m[i, j] = 0.7
# If the cell is in front of the range measurement, it is likely to be empty.
elif r < meas_r[k]:
m[i, j] = 0.3
return m
# Generates range measurements for a laser scanner based on a map, vehicle position,
# and sensor parameters.
# Uses the ray tracing algorithm.
def get_ranges(true_map, X, meas_phi, rmax):
(M, N) = np.shape(true_map)
x = X[0]
y = X[1]
theta = X[2]
meas_r = rmax * np.ones(meas_phi.shape)
# Iterate for each measurement bearing.
for i in range(len(meas_phi)):
# Iterate over each unit step up to and including rmax.
for r in range(1, rmax+1):
# Determine the coordinates of the cell.
xi = int(round(x + r * math.cos(theta + meas_phi[i])))
yi = int(round(y + r * math.sin(theta + meas_phi[i])))
# If not in the map, set measurement there and stop going further.
if (xi <= 0 or xi >= M-1 or yi <= 0 or yi >= N-1):
meas_r[i] = r
break
# If in the map, but hitting an obstacle, set the measurement range
# and stop ray tracing.
elif true_map[int(round(xi)), int(round(yi))] == 1:
meas_r[i] = r
break
return meas_r
```
In the following code block, we initialize the required variables for our simulation. This includes the initial state as well as the set of control actions for the car. We also set the rate of rotation of our lidar scan. The obstacles of the true map are represented by 1's in the true map, 0's represent free space. Each cell in the belief map `m` is initialized to 0.5 as our prior probability of occupancy, and from that belief map we compute our logodds occupancy grid `L`.
```
# Simulation time initialization.
T_MAX = 150
time_steps = np.arange(T_MAX)
# Initializing the robot's location.
x_0 = [30, 30, 0]
# The sequence of robot motions.
u = np.array([[3, 0, -3, 0], [0, 3, 0, -3]])
u_i = 1
# Robot sensor rotation command
w = np.multiply(0.3, np.ones(len(time_steps)))
# True map (note, columns of map correspond to y axis and rows to x axis, so
# robot position x = x(1) and y = x(2) are reversed when plotted to match
M = 50
N = 60
true_map = np.zeros((M, N))
true_map[0:10, 0:10] = 1
true_map[30:35, 40:45] = 1
true_map[3:6,40:60] = 1;
true_map[20:30,25:29] = 1;
true_map[40:50,5:25] = 1;
# Initialize the belief map.
# We are assuming a uniform prior.
m = np.multiply(0.5, np.ones((M, N)))
# Initialize the log odds ratio.
L0 = np.log(np.divide(m, np.subtract(1, m)))
L = L0
# Parameters for the sensor model.
meas_phi = np.arange(-0.4, 0.4, 0.05)
rmax = 30 # Max beam range.
alpha = 1 # Width of an obstacle (distance about measurement to fill in).
beta = 0.05 # Angular width of a beam.
# Initialize the vector of states for our simulation.
x = np.zeros((3, len(time_steps)))
x[:, 0] = x_0
```
Here is where you will enter your code. Your task is to complete the main simulation loop. After each step of robot motion, you are required to gather range data from your lidar scan, and then apply the inverse scanner model to map these to a measured occupancy belief map. From this, you will then perform a logodds update on your logodds occupancy grid, and update our belief map accordingly. As the car traverses through the environment, the occupancy grid belief map should move closer and closer to the true map. At the code block after the end of the loop, the code will output some values which will be used for grading your assignment. Make sure to copy down these values and save them in a .txt file for when your visualization looks correct. Good luck!
```
%%capture
# Intitialize figures.
map_fig = plt.figure()
map_ax = map_fig.add_subplot(111)
map_ax.set_xlim(0, N)
map_ax.set_ylim(0, M)
invmod_fig = plt.figure()
invmod_ax = invmod_fig.add_subplot(111)
invmod_ax.set_xlim(0, N)
invmod_ax.set_ylim(0, M)
belief_fig = plt.figure()
belief_ax = belief_fig.add_subplot(111)
belief_ax.set_xlim(0, N)
belief_ax.set_ylim(0, M)
meas_rs = []
meas_r = get_ranges(true_map, x[:, 0], meas_phi, rmax)
meas_rs.append(meas_r)
invmods = []
invmod = inverse_scanner(M, N, x[0, 0], x[1, 0], x[2, 0], meas_phi, meas_r, \
rmax, alpha, beta)
invmods.append(invmod)
ms = []
ms.append(m)
# Main simulation loop.
for t in range(1, len(time_steps)):
# Perform robot motion.
move = np.add(x[0:2, t-1], u[:, u_i])
# If we hit the map boundaries, or a collision would occur, remain still.
if (move[0] >= M - 1) or (move[1] >= N - 1) or (move[0] <= 0) or (move[1] <= 0) \
or true_map[int(round(move[0])), int(round(move[1]))] == 1:
x[:, t] = x[:, t-1]
u_i = (u_i + 1) % 4
else:
x[0:2, t] = move
x[2, t] = (x[2, t-1] + w[t]) % (2 * math.pi)
# TODO Gather the measurement range data, which we will convert to occupancy probabilities
# using our inverse measurement model.
# meas_r = ...
meas_r = get_ranges(true_map, x[:,t], meas_phi, rmax)
meas_rs.append(meas_r)
# TODO Given our range measurements and our robot location, apply our inverse scanner model
# to get our measure probabilities of occupancy.
# invmod = ...
invmod = inverse_scanner(M,N,x[0,t],x[1,t],x[2,t], meas_phi, meas_r, rmax, alpha, beta)
invmods.append(invmod)
# TODO Calculate and update the log odds of our occupancy grid, given our measured
# occupancy probabilities from the inverse model.
# L = ...
L = np.log(np.divide(invmod, np.subtract(1,invmod))) + L
# TODO Calculate a grid of probabilities from the log odds.
# m = ...
m = (np.exp(L)) /(1 + np.exp(L))
ms.append(m)
# Ouput for grading. Do not modify this code!
m_f = ms[-1]
print("{}".format(m_f[40, 10]))
print("{}".format(m_f[30, 40]))
print("{}".format(m_f[35, 40]))
print("{}".format(m_f[0, 50]))
print("{}".format(m_f[10, 5]))
print("{}".format(m_f[20, 15]))
print("{}".format(m_f[25, 50]))
```
Now that you have written your main simulation loop, you can visualize your robot motion in the true map, your measured belief map, and your occupancy grid belief map below. These are shown in the 1st, 2nd, and 3rd videos, respectively. If your 3rd video converges towards the true map shown in the 1st video, congratulations! You have completed the assignment. Please submit the output of the box above as a .txt file to the grader for this assignment.
```
def map_update(i):
map_ax.clear()
map_ax.set_xlim(0, N)
map_ax.set_ylim(0, M)
map_ax.imshow(np.subtract(1, true_map), cmap='gray', origin='lower', vmin=0.0, vmax=1.0)
x_plot = x[1, :i+1]
y_plot = x[0, :i+1]
map_ax.plot(x_plot, y_plot, "bx-")
def invmod_update(i):
invmod_ax.clear()
invmod_ax.set_xlim(0, N)
invmod_ax.set_ylim(0, M)
invmod_ax.imshow(invmods[i], cmap='gray', origin='lower', vmin=0.0, vmax=1.0)
for j in range(len(meas_rs[i])):
invmod_ax.plot(x[1, i] + meas_rs[i][j] * math.sin(meas_phi[j] + x[2, i]), \
x[0, i] + meas_rs[i][j] * math.cos(meas_phi[j] + x[2, i]), "ko")
invmod_ax.plot(x[1, i], x[0, i], 'bx')
def belief_update(i):
belief_ax.clear()
belief_ax.set_xlim(0, N)
belief_ax.set_ylim(0, M)
belief_ax.imshow(ms[i], cmap='gray', origin='lower', vmin=0.0, vmax=1.0)
belief_ax.plot(x[1, max(0, i-10):i], x[0, max(0, i-10):i], 'bx-')
map_anim = anim.FuncAnimation(map_fig, map_update, frames=len(x[0, :]), repeat=False)
invmod_anim = anim.FuncAnimation(invmod_fig, invmod_update, frames=len(x[0, :]), repeat=False)
belief_anim = anim.FuncAnimation(belief_fig, belief_update, frames=len(x[0, :]), repeat=False)
HTML(map_anim.to_html5_video())
HTML(invmod_anim.to_html5_video())
HTML(belief_anim.to_html5_video())
```
|
github_jupyter
|
import numpy as np
import math
import matplotlib.pyplot as plt
import matplotlib.animation as anim
from IPython.display import HTML
# Calculates the inverse measurement model for a laser scanner.
# It identifies three regions. The first where no information is available occurs
# outside of the scanning arc. The second where objects are likely to exist, at the
# end of the range measurement within the arc. The third are where objects are unlikely
# to exist, within the arc but with less distance than the range measurement.
def inverse_scanner(num_rows, num_cols, x, y, theta, meas_phi, meas_r, rmax, alpha, beta):
m = np.zeros((M, N))
for i in range(num_rows):
for j in range(num_cols):
# Find range and bearing relative to the input state (x, y, theta).
r = math.sqrt((i - x)**2 + (j - y)**2)
phi = (math.atan2(j - y, i - x) - theta + math.pi) % (2 * math.pi) - math.pi
# Find the range measurement associated with the relative bearing.
k = np.argmin(np.abs(np.subtract(phi, meas_phi)))
# If the range is greater than the maximum sensor range, or behind our range
# measurement, or is outside of the field of view of the sensor, then no
# new information is available.
if (r > min(rmax, meas_r[k] + alpha / 2.0)) or (abs(phi - meas_phi[k]) > beta / 2.0):
m[i, j] = 0.5
# If the range measurement lied within this cell, it is likely to be an object.
elif (meas_r[k] < rmax) and (abs(r - meas_r[k]) < alpha / 2.0):
m[i, j] = 0.7
# If the cell is in front of the range measurement, it is likely to be empty.
elif r < meas_r[k]:
m[i, j] = 0.3
return m
# Generates range measurements for a laser scanner based on a map, vehicle position,
# and sensor parameters.
# Uses the ray tracing algorithm.
def get_ranges(true_map, X, meas_phi, rmax):
(M, N) = np.shape(true_map)
x = X[0]
y = X[1]
theta = X[2]
meas_r = rmax * np.ones(meas_phi.shape)
# Iterate for each measurement bearing.
for i in range(len(meas_phi)):
# Iterate over each unit step up to and including rmax.
for r in range(1, rmax+1):
# Determine the coordinates of the cell.
xi = int(round(x + r * math.cos(theta + meas_phi[i])))
yi = int(round(y + r * math.sin(theta + meas_phi[i])))
# If not in the map, set measurement there and stop going further.
if (xi <= 0 or xi >= M-1 or yi <= 0 or yi >= N-1):
meas_r[i] = r
break
# If in the map, but hitting an obstacle, set the measurement range
# and stop ray tracing.
elif true_map[int(round(xi)), int(round(yi))] == 1:
meas_r[i] = r
break
return meas_r
# Simulation time initialization.
T_MAX = 150
time_steps = np.arange(T_MAX)
# Initializing the robot's location.
x_0 = [30, 30, 0]
# The sequence of robot motions.
u = np.array([[3, 0, -3, 0], [0, 3, 0, -3]])
u_i = 1
# Robot sensor rotation command
w = np.multiply(0.3, np.ones(len(time_steps)))
# True map (note, columns of map correspond to y axis and rows to x axis, so
# robot position x = x(1) and y = x(2) are reversed when plotted to match
M = 50
N = 60
true_map = np.zeros((M, N))
true_map[0:10, 0:10] = 1
true_map[30:35, 40:45] = 1
true_map[3:6,40:60] = 1;
true_map[20:30,25:29] = 1;
true_map[40:50,5:25] = 1;
# Initialize the belief map.
# We are assuming a uniform prior.
m = np.multiply(0.5, np.ones((M, N)))
# Initialize the log odds ratio.
L0 = np.log(np.divide(m, np.subtract(1, m)))
L = L0
# Parameters for the sensor model.
meas_phi = np.arange(-0.4, 0.4, 0.05)
rmax = 30 # Max beam range.
alpha = 1 # Width of an obstacle (distance about measurement to fill in).
beta = 0.05 # Angular width of a beam.
# Initialize the vector of states for our simulation.
x = np.zeros((3, len(time_steps)))
x[:, 0] = x_0
%%capture
# Intitialize figures.
map_fig = plt.figure()
map_ax = map_fig.add_subplot(111)
map_ax.set_xlim(0, N)
map_ax.set_ylim(0, M)
invmod_fig = plt.figure()
invmod_ax = invmod_fig.add_subplot(111)
invmod_ax.set_xlim(0, N)
invmod_ax.set_ylim(0, M)
belief_fig = plt.figure()
belief_ax = belief_fig.add_subplot(111)
belief_ax.set_xlim(0, N)
belief_ax.set_ylim(0, M)
meas_rs = []
meas_r = get_ranges(true_map, x[:, 0], meas_phi, rmax)
meas_rs.append(meas_r)
invmods = []
invmod = inverse_scanner(M, N, x[0, 0], x[1, 0], x[2, 0], meas_phi, meas_r, \
rmax, alpha, beta)
invmods.append(invmod)
ms = []
ms.append(m)
# Main simulation loop.
for t in range(1, len(time_steps)):
# Perform robot motion.
move = np.add(x[0:2, t-1], u[:, u_i])
# If we hit the map boundaries, or a collision would occur, remain still.
if (move[0] >= M - 1) or (move[1] >= N - 1) or (move[0] <= 0) or (move[1] <= 0) \
or true_map[int(round(move[0])), int(round(move[1]))] == 1:
x[:, t] = x[:, t-1]
u_i = (u_i + 1) % 4
else:
x[0:2, t] = move
x[2, t] = (x[2, t-1] + w[t]) % (2 * math.pi)
# TODO Gather the measurement range data, which we will convert to occupancy probabilities
# using our inverse measurement model.
# meas_r = ...
meas_r = get_ranges(true_map, x[:,t], meas_phi, rmax)
meas_rs.append(meas_r)
# TODO Given our range measurements and our robot location, apply our inverse scanner model
# to get our measure probabilities of occupancy.
# invmod = ...
invmod = inverse_scanner(M,N,x[0,t],x[1,t],x[2,t], meas_phi, meas_r, rmax, alpha, beta)
invmods.append(invmod)
# TODO Calculate and update the log odds of our occupancy grid, given our measured
# occupancy probabilities from the inverse model.
# L = ...
L = np.log(np.divide(invmod, np.subtract(1,invmod))) + L
# TODO Calculate a grid of probabilities from the log odds.
# m = ...
m = (np.exp(L)) /(1 + np.exp(L))
ms.append(m)
# Ouput for grading. Do not modify this code!
m_f = ms[-1]
print("{}".format(m_f[40, 10]))
print("{}".format(m_f[30, 40]))
print("{}".format(m_f[35, 40]))
print("{}".format(m_f[0, 50]))
print("{}".format(m_f[10, 5]))
print("{}".format(m_f[20, 15]))
print("{}".format(m_f[25, 50]))
def map_update(i):
map_ax.clear()
map_ax.set_xlim(0, N)
map_ax.set_ylim(0, M)
map_ax.imshow(np.subtract(1, true_map), cmap='gray', origin='lower', vmin=0.0, vmax=1.0)
x_plot = x[1, :i+1]
y_plot = x[0, :i+1]
map_ax.plot(x_plot, y_plot, "bx-")
def invmod_update(i):
invmod_ax.clear()
invmod_ax.set_xlim(0, N)
invmod_ax.set_ylim(0, M)
invmod_ax.imshow(invmods[i], cmap='gray', origin='lower', vmin=0.0, vmax=1.0)
for j in range(len(meas_rs[i])):
invmod_ax.plot(x[1, i] + meas_rs[i][j] * math.sin(meas_phi[j] + x[2, i]), \
x[0, i] + meas_rs[i][j] * math.cos(meas_phi[j] + x[2, i]), "ko")
invmod_ax.plot(x[1, i], x[0, i], 'bx')
def belief_update(i):
belief_ax.clear()
belief_ax.set_xlim(0, N)
belief_ax.set_ylim(0, M)
belief_ax.imshow(ms[i], cmap='gray', origin='lower', vmin=0.0, vmax=1.0)
belief_ax.plot(x[1, max(0, i-10):i], x[0, max(0, i-10):i], 'bx-')
map_anim = anim.FuncAnimation(map_fig, map_update, frames=len(x[0, :]), repeat=False)
invmod_anim = anim.FuncAnimation(invmod_fig, invmod_update, frames=len(x[0, :]), repeat=False)
belief_anim = anim.FuncAnimation(belief_fig, belief_update, frames=len(x[0, :]), repeat=False)
HTML(map_anim.to_html5_video())
HTML(invmod_anim.to_html5_video())
HTML(belief_anim.to_html5_video())
| 0.573559 | 0.989592 |
#DATASCI W261: Machine Learning at Scale
##Version 1: One MapReduce Stage (join data at the first reducer)
# Create Matrices
Matrix A data start with 0
Matrix B data start with 1
$$ \textbf{A} = \left( \begin{array}{ccc}
5 & 0 \\
3 & 8 \\
0 & 6 \end{array} \right) $$
$$ \textbf{B} = \left( \begin{array}{ccc}
6 & 3 \\
2 & 0 \end{array} \right) $$
```
# Format: A/B, rowIndex, columnIndex1, Value1, columnIndex2, Value2,...
!echo 0, 0, 0, 5 > Matrics.txt
!echo 0, 1, 0, 3, 1, 8 >> Matrics.txt
!echo 0, 2, 1, 6 >> Matrics.txt
!echo 1, 0, 0, 6, 1, 3 >> Matrics.txt
!echo 1, 1, 0, 2 >> Matrics.txt
```
# MrJob class code
```
%%writefile MatrixMultiplication.py
#Version 1: One MapReduce Stage (join data at the first reducer)
from mrjob.job import MRJob
from mrjob.compat import jobconf_from_env
class MRMatrixAB(MRJob):
#Emit all the data need to caculate cell i,j in result matrix
def mapper(self, _, line):
v = line.split(',')
n = (len(v)-2)/2 #number of Non-zero columns for this each
i = int(jobconf_from_env("row.num.A")) # we need to know how many rows of A
j = int(jobconf_from_env("col.num.B")) # we need to know how many columns of B
if v[0]=='0':
for p in range(n):
for q in range(j):
yield (int(v[1]),q), (int(v[p*2+2]),float(v[p*2+3]))
elif v[0]=='1':
for p in range(n):
for q in range(i):
yield (q,int(v[p*2+2])), (int(v[1]),float(v[p*2+3]))
# Sum up the product for cell i,j
def reducer(self, key, values):
idx_dict = {}
s = 0.0
preidx = -1
preval = 0
for idx, value in values:
if str(idx) in idx_dict:
s = s + value * idx_dict[str(idx)]
else:
idx_dict[str(idx)] = value
yield key,s
if __name__ == '__main__':
MRMatrixAB.run()
!python MatrixMultiplication.py Matrics.txt --jobconf row.num.A=3 --jobconf col.num.B=2
```
# Driver:
```
from numpy import empty
from MatrixMultiplication import MRMatrixAB
mr_job = MRMatrixAB(args=['Matrics.txt','--jobconf', 'row.num.A=3','--jobconf', 'col.num.B=2'])
C =[]
CC = empty([3,2])
# Calculate A*B
print "Matrix C = A * B:"
with mr_job.make_runner() as runner:
runner.run()
for line in runner.stream_output():
key,value = mr_job.parse_output_line(line)
C.append((key,value))
CC[key[0],key[1]] = value
print key, value
print " "
print "Matrix C"
print CC
```
|
github_jupyter
|
# Format: A/B, rowIndex, columnIndex1, Value1, columnIndex2, Value2,...
!echo 0, 0, 0, 5 > Matrics.txt
!echo 0, 1, 0, 3, 1, 8 >> Matrics.txt
!echo 0, 2, 1, 6 >> Matrics.txt
!echo 1, 0, 0, 6, 1, 3 >> Matrics.txt
!echo 1, 1, 0, 2 >> Matrics.txt
%%writefile MatrixMultiplication.py
#Version 1: One MapReduce Stage (join data at the first reducer)
from mrjob.job import MRJob
from mrjob.compat import jobconf_from_env
class MRMatrixAB(MRJob):
#Emit all the data need to caculate cell i,j in result matrix
def mapper(self, _, line):
v = line.split(',')
n = (len(v)-2)/2 #number of Non-zero columns for this each
i = int(jobconf_from_env("row.num.A")) # we need to know how many rows of A
j = int(jobconf_from_env("col.num.B")) # we need to know how many columns of B
if v[0]=='0':
for p in range(n):
for q in range(j):
yield (int(v[1]),q), (int(v[p*2+2]),float(v[p*2+3]))
elif v[0]=='1':
for p in range(n):
for q in range(i):
yield (q,int(v[p*2+2])), (int(v[1]),float(v[p*2+3]))
# Sum up the product for cell i,j
def reducer(self, key, values):
idx_dict = {}
s = 0.0
preidx = -1
preval = 0
for idx, value in values:
if str(idx) in idx_dict:
s = s + value * idx_dict[str(idx)]
else:
idx_dict[str(idx)] = value
yield key,s
if __name__ == '__main__':
MRMatrixAB.run()
!python MatrixMultiplication.py Matrics.txt --jobconf row.num.A=3 --jobconf col.num.B=2
from numpy import empty
from MatrixMultiplication import MRMatrixAB
mr_job = MRMatrixAB(args=['Matrics.txt','--jobconf', 'row.num.A=3','--jobconf', 'col.num.B=2'])
C =[]
CC = empty([3,2])
# Calculate A*B
print "Matrix C = A * B:"
with mr_job.make_runner() as runner:
runner.run()
for line in runner.stream_output():
key,value = mr_job.parse_output_line(line)
C.append((key,value))
CC[key[0],key[1]] = value
print key, value
print " "
print "Matrix C"
print CC
| 0.282988 | 0.890437 |
### 1901. Find a Peak Element II
#### Content
<p>A <strong>peak</strong> element in a 2D grid is an element that is <strong>strictly greater</strong> than all of its <strong>adjacent </strong>neighbors to the left, right, top, and bottom.</p>
<p>Given a <strong>0-indexed</strong> <code>m x n</code> matrix <code>mat</code> where <strong>no two adjacent cells are equal</strong>, find <strong>any</strong> peak element <code>mat[i][j]</code> and return <em>the length 2 array </em><code>[i,j]</code>.</p>
<p>You may assume that the entire matrix is surrounded by an <strong>outer perimeter</strong> with the value <code>-1</code> in each cell.</p>
<p>You must write an algorithm that runs in <code>O(m log(n))</code> or <code>O(n log(m))</code> time.</p>
<p> </p>
<p><strong>Example 1:</strong></p>
<p><img alt="" src="https://assets.leetcode.com/uploads/2021/06/08/1.png" style="width: 206px; height: 209px;" /></p>
<pre>
<strong>Input:</strong> mat = [[1,4],[3,2]]
<strong>Output:</strong> [0,1]
<strong>Explanation:</strong> Both 3 and 4 are peak elements so [1,0] and [0,1] are both acceptable answers.
</pre>
<p><strong>Example 2:</strong></p>
<p><strong><img alt="" src="https://assets.leetcode.com/uploads/2021/06/07/3.png" style="width: 254px; height: 257px;" /></strong></p>
<pre>
<strong>Input:</strong> mat = [[10,20,15],[21,30,14],[7,16,32]]
<strong>Output:</strong> [1,1]
<strong>Explanation:</strong> Both 30 and 32 are peak elements so [1,1] and [2,2] are both acceptable answers.
</pre>
<p> </p>
<p><strong>Constraints:</strong></p>
<ul>
<li><code>m == mat.length</code></li>
<li><code>n == mat[i].length</code></li>
<li><code>1 <= m, n <= 500</code></li>
<li><code>1 <= mat[i][j] <= 10<sup>5</sup></code></li>
<li>No two adjacent cells are equal.</li>
</ul>
#### Difficulty: Medium, AC rate: 59.9%
#### Question Tags:
- Array
- Binary Search
- Divide and Conquer
- Matrix
#### Links:
🎁 [Question Detail](https://leetcode.com/problems/find-a-peak-element-ii/description/) | 🎉 [Question Solution](https://leetcode.com/problems/find-a-peak-element-ii/solution/) | 💬 [Question Discussion](https://leetcode.com/problems/find-a-peak-element-ii/discuss/?orderBy=most_votes)
#### Hints:
<details><summary>Hint 0 🔍</summary>Let's assume that the width of the array is bigger than the height, otherwise, we will split in another direction.</details>
<details><summary>Hint 1 🔍</summary>Split the array into three parts: central column left side and right side.</details>
<details><summary>Hint 2 🔍</summary>Go through the central column and two neighbor columns and look for maximum.</details>
<details><summary>Hint 3 🔍</summary>If it's in the central column - this is our peak.</details>
<details><summary>Hint 4 🔍</summary>If it's on the left side, run this algorithm on subarray left_side + central_column.</details>
<details><summary>Hint 5 🔍</summary>If it's on the right side, run this algorithm on subarray right_side + central_column</details>
#### Sample Test Case
[[1,4],[3,2]]
---
What's your idea?
类似 #162,按行二分,行内找最大的元素 O(n), 总时间复杂度 O(n * log(m))
同样在 [MIT6.006](https://www.youtube.com/v/HtSuA80QTyo) 里有讲解
---
```
from typing import List
class Solution:
def findPeakGrid(self, mat: List[List[int]]) -> List[int]:
m = len(mat)
if m == 1:
max_j, _ = max(enumerate(mat[0]), key=lambda tup: tup[1])
return [0, max_j]
middle_row = m // 2
max_j, _ = max(enumerate(mat[middle_row]), key=lambda tup: tup[1])
if middle_row > 0 and mat[middle_row-1][max_j] > mat[middle_row][max_j]:
return self.findPeakGrid(mat[:middle_row])
elif middle_row < m - 1 and mat[middle_row+1][max_j] > mat[middle_row][max_j]:
result = self.findPeakGrid(mat[middle_row+1:])
return [sum(p) for p in zip(result, [middle_row+1, 0])]
else:
return [middle_row, max_j]
s = Solution()
print(s.findPeakGrid([[1,2,3]]) == [0, 2])
r = s.findPeakGrid([[1,4],[3,2]])
print(r == [1, 0] or r == [0, 1])
r = s.findPeakGrid([[10,20,15],[21,30,14],[7,16,32]])
print(r == [1, 1] or r == [2, 2])
r = s.findPeakGrid([[47,30,35,8,25],[6,36,19,41,40],[24,37,13,46,5],[3,43,15,50,19],[6,15,7,25,18]])
print(r == [0, 2] or r == [3, 3])
import sys, os; sys.path.append(os.path.abspath('..'))
from submitter import submit
submit(1901)
```
|
github_jupyter
|
from typing import List
class Solution:
def findPeakGrid(self, mat: List[List[int]]) -> List[int]:
m = len(mat)
if m == 1:
max_j, _ = max(enumerate(mat[0]), key=lambda tup: tup[1])
return [0, max_j]
middle_row = m // 2
max_j, _ = max(enumerate(mat[middle_row]), key=lambda tup: tup[1])
if middle_row > 0 and mat[middle_row-1][max_j] > mat[middle_row][max_j]:
return self.findPeakGrid(mat[:middle_row])
elif middle_row < m - 1 and mat[middle_row+1][max_j] > mat[middle_row][max_j]:
result = self.findPeakGrid(mat[middle_row+1:])
return [sum(p) for p in zip(result, [middle_row+1, 0])]
else:
return [middle_row, max_j]
s = Solution()
print(s.findPeakGrid([[1,2,3]]) == [0, 2])
r = s.findPeakGrid([[1,4],[3,2]])
print(r == [1, 0] or r == [0, 1])
r = s.findPeakGrid([[10,20,15],[21,30,14],[7,16,32]])
print(r == [1, 1] or r == [2, 2])
r = s.findPeakGrid([[47,30,35,8,25],[6,36,19,41,40],[24,37,13,46,5],[3,43,15,50,19],[6,15,7,25,18]])
print(r == [0, 2] or r == [3, 3])
import sys, os; sys.path.append(os.path.abspath('..'))
from submitter import submit
submit(1901)
| 0.528533 | 0.923316 |
```
import pandas as pd
import numpy as np
from numpy import mean
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
from sklearn.model_selection import train_test_split, cross_val_score, RepeatedStratifiedKFold, GridSearchCV
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.ensemble import RandomForestClassifier,RandomForestRegressor
from sklearn.tree import DecisionTreeClassifier,DecisionTreeRegressor
from sklearn.datasets import make_classification
from imblearn.pipeline import Pipeline
from imblearn.under_sampling import RandomUnderSampler
from imblearn.over_sampling import RandomOverSampler
from sklearn.metrics import classification_report,accuracy_score
# read the data
dataset=pd.read_csv('framingham.csv')
dataset.head()
dataset.info()
dataset.describe()
# missing values
dataset.isnull().sum()
```
### EDA
- education : A categorical variable of the participants education, with the levels: Some high school (1), high school/GED (2), some college/vocational school (3), college (4)
```
sns.countplot(data=dataset,x='education')
```
- As we can see most of the people have some high school(cat 1) and High school/GED(cat 2) education
- few people have college/vocational school(cat 3) and very few have degree(cat4)
#### Now we will look into risk factors, whihc includes the rate of smoking per day of person
```
sns.catplot(data=dataset,y='cigsPerDay',x='TenYearCHD',kind='bar')
```
- as we can see the people who have TenYearCHD smokes more cigs per day than who does not have
#### Now we will see if there is any relation between the age and CHD, using current smoker as category
```
sns.boxplot(data=dataset,x='TenYearCHD',y='age',hue='currentSmoker')
plt.legend(bbox_to_anchor=(1.05,1),loc=2)
```
- as we can see the older people are more likely to develop CHD who are not current smokers
- also people who are current smokers are likely to develop CHDS when compared to current smokers
#### Relationship between age, prevalent stroke, and the ten year risk of developing CHD.
```
sns.boxplot(data=dataset,x='prevalentStroke',y='age',hue='TenYearCHD')
```
- strokes are more prevalent in people of older age group
#### Relationship between age, diabetes, and the ten year risk of developing CHD
```
sns.boxplot(data=dataset,x='diabetes',y='age',hue='TenYearCHD')
plt.legend(loc=4)
```
- the older age group are more diabetic than the younger age group
#### Now we look into the cholestrol and ten year CHD, more cholestrol leads to CHD
```
sns.boxplot(data=dataset,x='TenYearCHD',y='totChol')
plt.show()
```
- Ten year CHD have more cholestrol level than the people who dont, the difference is very little.
- The total cholestrol have LDL and HDL, so LDL or bad cholestrol is said to increase the risk of CHD, where as HDL or good cholestrol is likely to decrease the risk of CHD
#### we will take a look at both systolic and diastolic blood pressure, and visualize their relationship with ten year CHD risk.
```
sns.catplot(data=dataset,x='TenYearCHD',y='sysBP',kind='bar')
sns.catplot(kind='bar',data=dataset,x='TenYearCHD',y='diaBP')
```
- people with TenYearCHD have more rate of systolic and diastolic blood pressure than the others
### Data Preprocessing
```
dataset.isnull().sum()
# drop the null values
dataset=dataset.dropna()
```
The model to classify the ten year risk of CHD needs to perform better than the baseline. A baseline model is a model that classifies everything into the majority class.
```
dataset['TenYearCHD'].value_counts()
3101/(557+3101)
```
- our model accuracy should be more than the baseline accuracy (>85%)
### Train Test split
```
X=dataset.drop('TenYearCHD',axis=1)
y=dataset['TenYearCHD']
X_train,X_test,y_train,y_test=train_test_split(X,y,random_state=69,test_size=0.33)
```
### Over and Under-sampling
The data set it unbalanced, and more than 80% of the data is of participants who don't have a ten year CHD risk. To overcome this, I did both; oversampling and undersampling. Then, I created a pipeline for a decision tree classifier.
```
oversample=RandomOverSampler(sampling_strategy='minority')
X_over,y_over=oversample.fit_resample(X,y)
X_train,X_test,y_train,y_test=train_test_split(X_over,y_over,random_state=69,test_size=0.33)
steps=[('under',RandomUnderSampler()),('model',DecisionTreeClassifier())]
pipeline=Pipeline(steps=steps)
pipeline.fit(X_train,y_train)
pred=pipeline.predict(X_test)
```
### Evaluating the model
```
print(classification_report(y_test,pred))
accuracy_score(y_test,pred)
```
The model has high precision and recall for both outcomes, and has an accuracy of 0.90, which beats the baseline.
|
github_jupyter
|
import pandas as pd
import numpy as np
from numpy import mean
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
from sklearn.model_selection import train_test_split, cross_val_score, RepeatedStratifiedKFold, GridSearchCV
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.ensemble import RandomForestClassifier,RandomForestRegressor
from sklearn.tree import DecisionTreeClassifier,DecisionTreeRegressor
from sklearn.datasets import make_classification
from imblearn.pipeline import Pipeline
from imblearn.under_sampling import RandomUnderSampler
from imblearn.over_sampling import RandomOverSampler
from sklearn.metrics import classification_report,accuracy_score
# read the data
dataset=pd.read_csv('framingham.csv')
dataset.head()
dataset.info()
dataset.describe()
# missing values
dataset.isnull().sum()
sns.countplot(data=dataset,x='education')
sns.catplot(data=dataset,y='cigsPerDay',x='TenYearCHD',kind='bar')
sns.boxplot(data=dataset,x='TenYearCHD',y='age',hue='currentSmoker')
plt.legend(bbox_to_anchor=(1.05,1),loc=2)
sns.boxplot(data=dataset,x='prevalentStroke',y='age',hue='TenYearCHD')
sns.boxplot(data=dataset,x='diabetes',y='age',hue='TenYearCHD')
plt.legend(loc=4)
sns.boxplot(data=dataset,x='TenYearCHD',y='totChol')
plt.show()
sns.catplot(data=dataset,x='TenYearCHD',y='sysBP',kind='bar')
sns.catplot(kind='bar',data=dataset,x='TenYearCHD',y='diaBP')
dataset.isnull().sum()
# drop the null values
dataset=dataset.dropna()
dataset['TenYearCHD'].value_counts()
3101/(557+3101)
X=dataset.drop('TenYearCHD',axis=1)
y=dataset['TenYearCHD']
X_train,X_test,y_train,y_test=train_test_split(X,y,random_state=69,test_size=0.33)
oversample=RandomOverSampler(sampling_strategy='minority')
X_over,y_over=oversample.fit_resample(X,y)
X_train,X_test,y_train,y_test=train_test_split(X_over,y_over,random_state=69,test_size=0.33)
steps=[('under',RandomUnderSampler()),('model',DecisionTreeClassifier())]
pipeline=Pipeline(steps=steps)
pipeline.fit(X_train,y_train)
pred=pipeline.predict(X_test)
print(classification_report(y_test,pred))
accuracy_score(y_test,pred)
| 0.684475 | 0.886666 |
```
# default_exp exp.metrics_java
# export
import pandas as pd
import os
import shutil
from subprocess import *
# export
import logging
logger = logging.getLogger()
fhandler = logging.FileHandler(filename='mylog.log', mode='a')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fhandler.setFormatter(formatter)
logger.addHandler(fhandler)
logger.setLevel(logging.INFO)
```
## metrics_java
> This module provides a tool for computing metrics (from static analysis) for python source code using Using <a href="https://github.com/mauricioaniche/ck">CK Package</a>
> @Alvaro 26 Jan 2021
Using <a href="https://github.com/mauricioaniche/ck">CK Package</a>
CK is a java package (jar) which is going to be executed from terminal. It requires the code which is going to be analyzed to be located at <i>physical</i> files. For that reason, the dataset is going to be used to produce some <i>.java</i> files.
## Available metrics
#### Note: Further info. can be found at the github repository of the project.
Structural & complexity
- wmc: Weight Method Class or McCabe's complexity
- loc: Lines of code
Complexity-related
- returnQty: Number of return instructions
- loopQty: Number of loops (i.e., for, while, do while, enhanced for).
- comparisonsQty: Number of comparisons (i.e., == and !=)
- tryCatchQty: Number of try/catch blocks
- parenthesizedExpsQty: The number of expressions inside parenthesis
- nosi: number of invocations to static methods. It can only count the ones that can be resolved by the JDT.
- assignmentsQty
- mathOperationsQty: The number of math operations (times, divide, remainder, plus, minus, left shit, right shift).
- variablesQty: Number of declared variables
- maxNestedBlocksQty: The highest number of blocks nested together.
Literals
- stringLiteralsQty: Number of string literals
- numbersQty: Number of numeric literals
Number of methods: Count the number of fields, both total (totalMethodsQty) and specific (i.e., static, public, abstract, private, protected, default, final, and synchronized)
- totalMethodsQty:
- staticMethodsQty
- publicMethodsQty
- privateMethodsQty
- protectedMethodsQty
- defaultMethodsQty
- abstractMethodsQty
- finalMethodsQty
- synchronizedMethodsQty
Number of fields: Count the number of fields, both total (totalFieldsQty) and specific (i.e., static, public, private, protected, default, final, and synchronized)
- totalFieldsQty
- staticFieldsQty
- publicFieldsQty
- privateFieldsQty
- protectedFieldsQty
- defaultFieldsQty
- visibleFieldsQty
- finalFieldsQty
- synchronizedFieldsQty
Classes
- anonymousClassesQty: Number of anonymous classes
- innerClassesQty: Number of inner classes
- lambdasQty: Number of lambda expressions
Indepentent
- uniqueWordsQty: Number of unique words: Number of unique words in the source code. At method level, it only uses the method body as input. At class level, it uses the entire body of the class as metrics. The algorithm basically counts the number of words in a method/class, after removing Java keywords.
- modifiers: public/abstract/private/protected/native modifiers of classes/methods
Each record, corresponds to a individual class. When working with method-level snippets, "articial" classes are created for performing the analysis.
```
#Utils method
# export
def write_dataset_to_files(df_series, destination_path):
"""
Function to generate .java files.
Params:
# df_series: Pandas Series (DataFrame column) with the source code records.
# destination_path: (str) Absolute path to be used as directory for the generated files.
Returns:
Collection of paths for the corresponding java files.
"""
java_template = 'public class <class_name>{\n <code_snippet>\n}'
if not os.path.exists(destination_path):
logging.info('Creating directory.')
os.mkdir(destination_path)
logging.info("Generating physical .java files.")
file_paths = []
for idx, value in df_series.iteritems():
class_name = f'ClassRecord{idx}'
code = java_template.replace('<class_name>', class_name)
code = code.replace('<code_snippet>', value)
file_path = f'{destination_path}/{class_name}.java'
with open(file_path, 'w') as file:
file.write(code)
file_paths.append(file_path)
return file_paths
```
Execute <i>jar</i> file from python and get the output
```
# export
def jarWrapper(*args):
process = Popen(['java', '-jar']+list(args), stdout=PIPE, stderr=PIPE)
ret = []
while process.poll() is None:
line = process.stdout.readline()
if line != '' and line.endswith(b'\n'):
ret.append(line[:-1])
stdout, stderr = process.communicate()
ret += stdout.split(b'\n')
if stderr != '':
ret += stderr.split(b'\n')
if '' in ret:
ret.remove('')
return ret
# Execution example
args = ['/tf/main/tools/ck_metrics_tool/ck-metrics.jar', '/tf/main/nbs/test_data/test_metrics', 'false', '0', 'True'] # Any number of args to be passed to the jar file
result = jarWrapper(*args)
print(f'Result: {result}')
!pwd
# export
class JavaAnalyzer():
"""
Class get metrics f
"""
def __init__(self, ck_jar_path):
self.ck_jar_path = ck_jar_path
def compute_metrics(self, df_series, files_destination_path, remove_java_files=True):
"""
Computes metrics for a pandas series of java source code snippets
Params
# df_series: Pandas series (df column) containing java source snippets
# files_destination_path: Path indicating where the physical .java files are going to be created (for metrics computation)
Returns:
Pandas Dataframe containing metrics
"""
file_paths = write_dataset_to_files(df_series, files_destination_path)
self.__call_ck_package(files_destination_path)
metrics_df = self.__get_metrics_df()
self.__remove_csv_files()
if remove_java_files:
self.__remove_tmp_java_files(file_paths)
return metrics_df
def __call_ck_package(self, files_path):
"""
Performs call to external .jar package.
"""
args = [self.ck_jar_path, files_path, 'false', '0', 'True']
result = jarWrapper(*args)
logging.info(f'CK package produced this output:\n{result}')
def __get_metrics_df(self):
"""
Reads report files (csv) generated by the CK package.
Returns:
Pandas Dataframe containing appropriate metrics
"""
class_metrics_df = pd.read_csv('class.csv')
# method_metrics_df = pd.read_csv('method.csv')
# merged_df = pd.merge(left = class_metrics_df, right = method_metrics_df, left_on='file', right_on='file')
appropriate_columns = ['file','class', 'wmc', 'totalMethodsQty', 'staticMethodsQty', 'publicMethodsQty', 'privateMethodsQty',
'protectedMethodsQty', 'defaultMethodsQty', 'abstractMethodsQty', 'finalMethodsQty','synchronizedMethodsQty',
'totalFieldsQty', 'staticFieldsQty', 'publicFieldsQty', 'privateFieldsQty', 'protectedFieldsQty',
'defaultFieldsQty', 'visibleFieldsQty', 'finalFieldsQty', 'synchronizedFieldsQty',
'nosi', 'loc', 'returnQty', 'loopQty', 'comparisonsQty', 'tryCatchQty', 'parenthesizedExpsQty',
'stringLiteralsQty', 'numbersQty', 'assignmentsQty', 'mathOperationsQty', 'variablesQty', 'maxNestedBlocksQty',
'anonymousClassesQty', 'innerClassesQty', 'lambdasQty', 'uniqueWordsQty', 'modifiers']
class_metrics_df = class_metrics_df[appropriate_columns]
return class_metrics_df
def __remove_csv_files(self):
"""
Removes files generated by CK package.
"""
if os.path.exists('class.csv'):
os.remove('class.csv')
if os.path.exists('method.csv'):
os.remove('method.csv')
if os.path.exists('field.csv'):
os.remove('field.csv')
def __remove_tmp_java_files(self, paths):
"""
Removes the temporary generated java files.
"""
for file_path in paths:
os.remove(file_path)
# General parameters for testing
def get_default_params():
return {
'ck_jar_path': '/tf/main/tools/ck_metrics_tool/ck-metrics.jar',
'search_net_ds_path': '/tf/main/dvc-ds4se/code/searchnet/clean_java.csv',
'sampling_size': 100,
'physical_files_path': '/tf/main/nbs/test_data/test_metrics'
}
```
## Testing JavaAnalyzer
Explore with some data
```
params = get_default_params()
java_analyzer = JavaAnalyzer(params['ck_jar_path'])
java_df = pd.read_csv(params['search_net_ds_path'])
java_df.head()
samples = java_df.sample(params['sampling_size'])
paths = write_dataset_to_files(samples['code'], params['physical_files_path'])
java_metrics = java_analyzer.compute_metrics(samples['code'], params['physical_files_path'], remove_java_files=False)
#show metrics dataframe
java_metrics.head()
samples.shape
java_metrics.shape
samples.loc[125182]
java_metrics.loc[java_metrics['class'] == 'ClassRecord385189']
java_metrics
print(f'Metrics dataframe columns:\n {java_metrics.columns}')
```
It is important to remark that each snippet in the dataset, is "transformed" into a class (including a <i>physical</i> .java file) to get the metrics
### Establish categories
```
# export
metrics_categories = {
"Structural": ['wmc', 'loc'],
"Complexity": ['returnQty', 'loopQty', 'comparisonsQty',
'tryCatchQty', 'parenthesizedExpsQty', 'nosi',
'assignmentsQty', 'mathOperationsQty', 'variablesQty',
'maxNestedBlocksQty'
],
"Literals": ['stringLiteralsQty', 'numbersQty'],
"Methods": ['totalMethodsQty', 'staticMethodsQty', 'publicMethodsQty',
'privateMethodsQty', 'protectedMethodsQty', 'defaultMethodsQty',
'abstractMethodsQty', 'finalMethodsQty', 'synchronizedMethodsQty'
],
"Fields": ['totalFieldsQty', 'staticFieldsQty', 'publicFieldsQty',
'privateFieldsQty', 'protectedFieldsQty', 'defaultFieldsQty',
'visibleFieldsQty', 'finalFieldsQty','synchronizedFieldsQty'
],
"Indepentent": ['uniqueWordsQty', 'modifiers'],
"Classes": ['anonymousClassesQty', 'innerClassesQty', 'lambdasQty']
}
```
Compute euclidean distance by categories
```
# export
def compute_categorized_euclidean_dist(errors_df1: pd.DataFrame, errors_df2: pd.DataFrame,
stat: Optional[str]='mean') -> Dict:
comparisons = { }
for category in metrics_categories:
df1 = errors_df1[metrics_categories[category]]
df2 = errors_df2[metrics_categories[category]]
stats1 = df1.describe().loc[stat].values
stats2 = df2.describe().loc[stat].values
comparisons[category] = np.linalg.norm(stats1 - stats2)
return comparisons
from nbdev.export import notebook2script
notebook2script()
```
|
github_jupyter
|
# default_exp exp.metrics_java
# export
import pandas as pd
import os
import shutil
from subprocess import *
# export
import logging
logger = logging.getLogger()
fhandler = logging.FileHandler(filename='mylog.log', mode='a')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fhandler.setFormatter(formatter)
logger.addHandler(fhandler)
logger.setLevel(logging.INFO)
#Utils method
# export
def write_dataset_to_files(df_series, destination_path):
"""
Function to generate .java files.
Params:
# df_series: Pandas Series (DataFrame column) with the source code records.
# destination_path: (str) Absolute path to be used as directory for the generated files.
Returns:
Collection of paths for the corresponding java files.
"""
java_template = 'public class <class_name>{\n <code_snippet>\n}'
if not os.path.exists(destination_path):
logging.info('Creating directory.')
os.mkdir(destination_path)
logging.info("Generating physical .java files.")
file_paths = []
for idx, value in df_series.iteritems():
class_name = f'ClassRecord{idx}'
code = java_template.replace('<class_name>', class_name)
code = code.replace('<code_snippet>', value)
file_path = f'{destination_path}/{class_name}.java'
with open(file_path, 'w') as file:
file.write(code)
file_paths.append(file_path)
return file_paths
# export
def jarWrapper(*args):
process = Popen(['java', '-jar']+list(args), stdout=PIPE, stderr=PIPE)
ret = []
while process.poll() is None:
line = process.stdout.readline()
if line != '' and line.endswith(b'\n'):
ret.append(line[:-1])
stdout, stderr = process.communicate()
ret += stdout.split(b'\n')
if stderr != '':
ret += stderr.split(b'\n')
if '' in ret:
ret.remove('')
return ret
# Execution example
args = ['/tf/main/tools/ck_metrics_tool/ck-metrics.jar', '/tf/main/nbs/test_data/test_metrics', 'false', '0', 'True'] # Any number of args to be passed to the jar file
result = jarWrapper(*args)
print(f'Result: {result}')
!pwd
# export
class JavaAnalyzer():
"""
Class get metrics f
"""
def __init__(self, ck_jar_path):
self.ck_jar_path = ck_jar_path
def compute_metrics(self, df_series, files_destination_path, remove_java_files=True):
"""
Computes metrics for a pandas series of java source code snippets
Params
# df_series: Pandas series (df column) containing java source snippets
# files_destination_path: Path indicating where the physical .java files are going to be created (for metrics computation)
Returns:
Pandas Dataframe containing metrics
"""
file_paths = write_dataset_to_files(df_series, files_destination_path)
self.__call_ck_package(files_destination_path)
metrics_df = self.__get_metrics_df()
self.__remove_csv_files()
if remove_java_files:
self.__remove_tmp_java_files(file_paths)
return metrics_df
def __call_ck_package(self, files_path):
"""
Performs call to external .jar package.
"""
args = [self.ck_jar_path, files_path, 'false', '0', 'True']
result = jarWrapper(*args)
logging.info(f'CK package produced this output:\n{result}')
def __get_metrics_df(self):
"""
Reads report files (csv) generated by the CK package.
Returns:
Pandas Dataframe containing appropriate metrics
"""
class_metrics_df = pd.read_csv('class.csv')
# method_metrics_df = pd.read_csv('method.csv')
# merged_df = pd.merge(left = class_metrics_df, right = method_metrics_df, left_on='file', right_on='file')
appropriate_columns = ['file','class', 'wmc', 'totalMethodsQty', 'staticMethodsQty', 'publicMethodsQty', 'privateMethodsQty',
'protectedMethodsQty', 'defaultMethodsQty', 'abstractMethodsQty', 'finalMethodsQty','synchronizedMethodsQty',
'totalFieldsQty', 'staticFieldsQty', 'publicFieldsQty', 'privateFieldsQty', 'protectedFieldsQty',
'defaultFieldsQty', 'visibleFieldsQty', 'finalFieldsQty', 'synchronizedFieldsQty',
'nosi', 'loc', 'returnQty', 'loopQty', 'comparisonsQty', 'tryCatchQty', 'parenthesizedExpsQty',
'stringLiteralsQty', 'numbersQty', 'assignmentsQty', 'mathOperationsQty', 'variablesQty', 'maxNestedBlocksQty',
'anonymousClassesQty', 'innerClassesQty', 'lambdasQty', 'uniqueWordsQty', 'modifiers']
class_metrics_df = class_metrics_df[appropriate_columns]
return class_metrics_df
def __remove_csv_files(self):
"""
Removes files generated by CK package.
"""
if os.path.exists('class.csv'):
os.remove('class.csv')
if os.path.exists('method.csv'):
os.remove('method.csv')
if os.path.exists('field.csv'):
os.remove('field.csv')
def __remove_tmp_java_files(self, paths):
"""
Removes the temporary generated java files.
"""
for file_path in paths:
os.remove(file_path)
# General parameters for testing
def get_default_params():
return {
'ck_jar_path': '/tf/main/tools/ck_metrics_tool/ck-metrics.jar',
'search_net_ds_path': '/tf/main/dvc-ds4se/code/searchnet/clean_java.csv',
'sampling_size': 100,
'physical_files_path': '/tf/main/nbs/test_data/test_metrics'
}
params = get_default_params()
java_analyzer = JavaAnalyzer(params['ck_jar_path'])
java_df = pd.read_csv(params['search_net_ds_path'])
java_df.head()
samples = java_df.sample(params['sampling_size'])
paths = write_dataset_to_files(samples['code'], params['physical_files_path'])
java_metrics = java_analyzer.compute_metrics(samples['code'], params['physical_files_path'], remove_java_files=False)
#show metrics dataframe
java_metrics.head()
samples.shape
java_metrics.shape
samples.loc[125182]
java_metrics.loc[java_metrics['class'] == 'ClassRecord385189']
java_metrics
print(f'Metrics dataframe columns:\n {java_metrics.columns}')
# export
metrics_categories = {
"Structural": ['wmc', 'loc'],
"Complexity": ['returnQty', 'loopQty', 'comparisonsQty',
'tryCatchQty', 'parenthesizedExpsQty', 'nosi',
'assignmentsQty', 'mathOperationsQty', 'variablesQty',
'maxNestedBlocksQty'
],
"Literals": ['stringLiteralsQty', 'numbersQty'],
"Methods": ['totalMethodsQty', 'staticMethodsQty', 'publicMethodsQty',
'privateMethodsQty', 'protectedMethodsQty', 'defaultMethodsQty',
'abstractMethodsQty', 'finalMethodsQty', 'synchronizedMethodsQty'
],
"Fields": ['totalFieldsQty', 'staticFieldsQty', 'publicFieldsQty',
'privateFieldsQty', 'protectedFieldsQty', 'defaultFieldsQty',
'visibleFieldsQty', 'finalFieldsQty','synchronizedFieldsQty'
],
"Indepentent": ['uniqueWordsQty', 'modifiers'],
"Classes": ['anonymousClassesQty', 'innerClassesQty', 'lambdasQty']
}
# export
def compute_categorized_euclidean_dist(errors_df1: pd.DataFrame, errors_df2: pd.DataFrame,
stat: Optional[str]='mean') -> Dict:
comparisons = { }
for category in metrics_categories:
df1 = errors_df1[metrics_categories[category]]
df2 = errors_df2[metrics_categories[category]]
stats1 = df1.describe().loc[stat].values
stats2 = df2.describe().loc[stat].values
comparisons[category] = np.linalg.norm(stats1 - stats2)
return comparisons
from nbdev.export import notebook2script
notebook2script()
| 0.616705 | 0.590543 |
# Lists
Earlier when discussing strings we introduced the concept of a *sequence* in Python. Lists can be thought of the most general version of a *sequence* in Python. Unlike strings, they are mutable, meaning the elements inside a list can be changed!
In this section we will learn about:
1.) Creating lists
2.) Indexing and Slicing Lists
3.) Basic List Methods
4.) Nesting Lists
5.) Introduction to List Comprehensions
Lists are constructed with brackets [] and commas separating every element in the list.
Let's go ahead and see how we can construct lists!
```
# Assign a list to an variable named my_list
my_list = [1,2,3]
```
We just created a list of integers, but lists can actually hold different object types. For example:
```
my_list = ['A string',23,100.232,'o']
```
Just like strings, the len() function will tell you how many items are in the sequence of the list.
```
len(my_list)
```
### Indexing and Slicing
Indexing and slicing work just like in strings. Let's make a new list to remind ourselves of how this works:
```
my_list = ['one','two','three',4,5]
# Grab element at index 0
my_list[0]
# Grab index 1 and everything past it
my_list[1:]
# Grab everything UP TO index 3
my_list[:3]
```
We can also use + to concatenate lists, just like we did for strings.
```
my_list + ['new item']
```
Note: This doesn't actually change the original list!
```
my_list
```
You would have to reassign the list to make the change permanent.
```
# Reassign
my_list = my_list + ['add new item permanently']
my_list
```
We can also use the * for a duplication method similar to strings:
```
# Make the list double
my_list * 2
# Again doubling not permanent
my_list
```
## Basic List Methods
If you are familiar with another programming language, you might start to draw parallels between arrays in another language and lists in Python. Lists in Python however, tend to be more flexible than arrays in other languages for a two good reasons: they have no fixed size (meaning we don't have to specify how big a list will be), and they have no fixed type constraint (like we've seen above).
Let's go ahead and explore some more special methods for lists:
```
# Create a new list
list1 = [1,2,3]
```
Use the **append** method to permanently add an item to the end of a list:
```
# Append
list1.append('append me!')
# Show
list1
```
Use **pop** to "pop off" an item from the list. By default pop takes off the last index, but you can also specify which index to pop off. Let's see an example:
```
# Pop off the 0 indexed item
list1.pop(0)
# Show
list1
# Assign the popped element, remember default popped index is -1
popped_item = list1.pop()
popped_item
# Show remaining list
list1
```
It should also be noted that lists indexing will return an error if there is no element at that index. For example:
```
list1[100]
```
We can use the **sort** method and the **reverse** methods to also effect your lists:
```
new_list = ['a','e','x','b','c']
#Show
new_list
# Use reverse to reverse order (this is permanent!)
new_list.reverse()
new_list
# Use sort to sort the list (in this case alphabetical order, but for numbers it will go ascending)
new_list.sort()
new_list
```
## Nesting Lists
A great feature of of Python data structures is that they support *nesting*. This means we can have data structures within data structures. For example: A list inside a list.
Let's see how this works!
```
# Let's make three lists
lst_1=[1,2,3]
lst_2=[4,5,6]
lst_3=[7,8,9]
# Make a list of lists to form a matrix
matrix = [lst_1,lst_2,lst_3]
# Show
matrix
```
We can again use indexing to grab elements, but now there are two levels for the index. The items in the matrix object, and then the items inside that list!
```
# Grab first item in matrix object
matrix[0]
# Grab first item of the first item in the matrix object
matrix[0][0]
```
# List Comprehensions
Python has an advanced feature called list comprehensions. They allow for quick construction of lists. To fully understand list comprehensions we need to understand for loops. So don't worry if you don't completely understand this section, and feel free to just skip it since we will return to this topic later.
But in case you want to know now, here are a few examples!
```
# Build a list comprehension by deconstructing a for loop within a []
first_col = [row[0] for row in matrix]
first_col
```
We used a list comprehension here to grab the first element of every row in the matrix object. We will cover this in much more detail later on!
For more advanced methods and features of lists in Python, check out the Advanced Lists section later on in this course!
|
github_jupyter
|
# Assign a list to an variable named my_list
my_list = [1,2,3]
my_list = ['A string',23,100.232,'o']
len(my_list)
my_list = ['one','two','three',4,5]
# Grab element at index 0
my_list[0]
# Grab index 1 and everything past it
my_list[1:]
# Grab everything UP TO index 3
my_list[:3]
my_list + ['new item']
my_list
# Reassign
my_list = my_list + ['add new item permanently']
my_list
# Make the list double
my_list * 2
# Again doubling not permanent
my_list
# Create a new list
list1 = [1,2,3]
# Append
list1.append('append me!')
# Show
list1
# Pop off the 0 indexed item
list1.pop(0)
# Show
list1
# Assign the popped element, remember default popped index is -1
popped_item = list1.pop()
popped_item
# Show remaining list
list1
list1[100]
new_list = ['a','e','x','b','c']
#Show
new_list
# Use reverse to reverse order (this is permanent!)
new_list.reverse()
new_list
# Use sort to sort the list (in this case alphabetical order, but for numbers it will go ascending)
new_list.sort()
new_list
# Let's make three lists
lst_1=[1,2,3]
lst_2=[4,5,6]
lst_3=[7,8,9]
# Make a list of lists to form a matrix
matrix = [lst_1,lst_2,lst_3]
# Show
matrix
# Grab first item in matrix object
matrix[0]
# Grab first item of the first item in the matrix object
matrix[0][0]
# Build a list comprehension by deconstructing a for loop within a []
first_col = [row[0] for row in matrix]
first_col
| 0.19544 | 0.979784 |
##Module 6.5: Dueling DQN / Advantage Network
We implement a dueling - or advantage - (D)DQN based reinforcement learning system with experiential replay to control the cart-pole environment from AI Gym.
We will also rander and record our environment so we can play back videos of how our system is controlling it.
In this module we will pay attention to:
- Examining the value/advantage architecture of the DQN.
We will also set up our class so we can choose to have a DQN or DDQN setup.
Note that we will not spend time tuning hyper-parameters: The purpose is to show how different techniques can be implemented in Keras, not to solve particular data science problems as optimally as possible. Obviously, most techniques include hyper-parameters that need to be tuned for optimal performance.
We will need to install some additional libraries in order to capture and play back video of our reinforcement learning system controlling the environment. We install the libraries now.
```
#remove " > /dev/null 2>&1" to see what is going on under the hood
!pip install gym pyvirtualdisplay > /dev/null 2>&1
!apt-get install -y xvfb python-opengl ffmpeg > /dev/null 2>&1
```
And here we set up the display for video playback. Don't worry if you see a warning.
Displaying interactive graphics is a headache in Colab, which is why we choose to capture and playback video. If this does not work for you, you can run the code without the video components
```
from IPython import display as ipythondisplay
from IPython.display import HTML
from pyvirtualdisplay import Display
display = Display(visible=0, size=(1400, 900))
display.start()
```
Now we set up the cart-pole environments using AI Gym.
```
from gym.wrappers import TimeLimit
from gym.envs.classic_control import CartPoleEnv
env = TimeLimit(CartPoleEnv(),max_episode_steps=251)
env_long = TimeLimit(CartPoleEnv(),max_episode_steps=5001)
```
Now we set up our reinforcement learning control system. We will use a class for this.
```
# Import required libraries
import numpy as np
import matplotlib.pyplot as plt
import random
import time
import glob
import io
import base64
from keras.optimizers import Adam
from keras.layers import Input, Dense, Lambda
from keras.layers.merge import Add
from keras.models import Model
import keras.backend as K
from gym.wrappers import Monitor
# Our Dueling (D)DQN Reinforcement Learning Class
class Dueling_DDQN_Brain:
# Lots of parameters we can initialize
def __init__ (
self,
epsilon_start=.9,
epsilon_stop=.1,
epsilon_decay_episodes=500,
gamma=.9,
use_DDQN=True, # Here we specify if we use a double network
update_period=20,
memory_max=10000,
batch_size=128,
state_size=4,
action_size=2,
hidden_size=256,
learning_rate=.01
):
self.steps=0
self.qNet=self.makeDQN(state_size, action_size, hidden_size,learning_rate)
self.use_DDQN=use_DDQN
# We make the DDQN anyway - we might change our mind about having one
self.dqNet=self.makeDQN(state_size, action_size, hidden_size,learning_rate)
self.update_weights()
self.epsilon=epsilon_start
self.epsilon_stop=epsilon_stop
self.epsilon_decay = self.epsilon_stop / self.epsilon
self.epsilon_decay = self.epsilon_decay ** (1. / float(epsilon_decay_episodes))
self.gamma=gamma
self.memory_max=memory_max
self.memory=[]
self.batch_size=batch_size
self.replay_count=0
self.update_period=update_period
# THIS FUNCTION HAS BEEN CHANGED
# Here we create the DQN we will use
# This will have a state-value/action-advantage structure
def makeDQN(self,state_size, action_size, hidden_size,learning_rate):
# Standard start to the architecture
inputs = Input(shape=(state_size,))
h1 = Dense(hidden_size, activation='relu')(inputs)
h2 = Dense(int(hidden_size/2), activation='relu')(h1)
# Network splits into value and advantage branches
# Value branch
value_dense = Dense(1)(h2)
value = Lambda(lambda s: K.expand_dims(s[:, 0], -1), output_shape=(action_size,))(value_dense)
# Advantage branch
advantage_dense = Dense(action_size)(h2)
advantage = Lambda(lambda a: a[:, :] - K.mean(a[:, :], keepdims=True),
output_shape=(action_size,))(advantage_dense)
# Sum Value and Advantages to get Q-values
output = Add()([value, advantage])
model = Model(input=inputs, output=output)
model.summary()
model.compile(loss='mse', optimizer=Adam(lr=learning_rate))
return model
# This function decays epsilon according to our parameters
def update_epsilon(self):
if self.epsilon > self.epsilon_stop:
self.epsilon *= self.epsilon_decay
# Here we copy the weight values of qNet into dqNet
def update_weights(self):
self.dqNet.set_weights(self.qNet.get_weights())
# We make a decision based on which of our estimated
# Q-values is highest for a given state
def decide (self, state):
Qs = self.qNet.predict(state)
return np.argmax(Qs)
# We store a SARS datum in memory.
def add_to_memory (self,state,action,reward,next_state,done):
if len(self.memory)==self.memory_max:
self.memory.pop(0)
self.memory.append((state,action,reward,next_state,done))
# THIS FUNCTION HAS BEEN CHANGED
# This is where we get the q-value for
# a state for training purposes.
def get_target_q_value (self,next_state,reward,done):
if done:
return reward
# CHANGE:
# We use DDQN or DQN approach depending on setup
elif self.use_DDQN:
# DDQN
# We use qNet to determine which action to take in
# the next state.
action = np.argmax(self.qNet.predict(next_state)[0])
# But then we use dqNet not qNet to predict q_value
# of this action being taken in the next state
q_value = self.dqNet.predict(next_state)[0,action]
return reward + self.gamma * q_value
else:
# DQN
q_value = np.amax(self.qNet.predict(next_state)[0])
return reward + self.gamma * q_value
# THIS FUNCTION HAS BEEN CHANGED
# This is where we fetch data from memory and prepare it
# as a training batch
def fetch_batch_from_memory (self):
# Take a random sample of data store in memory
memory_sample = random.sample(self.memory, min(self.batch_size,len(self.memory)))
# Construct training batch from memory store
states = np.zeros((self.batch_size,4))
q_values = np.zeros((self.batch_size,2))
i=0
for state, action, reward, next_state, done in memory_sample:
# CHANGE:
# We use DDQN or DQN approach depending on setup
if self.use_DDQN:
# DDQN
# Get q-values for state using dqNet, not qNet
q_values[i] = self.dqNet.predict(state)[0]
else:
# DQN
# Get q-values for state using qNet
q_values[i] = self.qNet.predict(state)[0]
# Get current estimate of action's q-value
q_values[i,action] = self.get_target_q_value(next_state, reward, done)
# Add state and q-values to batch
states[i]=state[0]
i+=1
return states,q_values
# This is where we train the network, by 'replaying'
# data stored in memory
def replay (self):
# We only start training once their is at least enough
# data in memory to create a full batch
if self.batch_size <= len(self.memory):
# We prepare the input and target batches
inputs,targets = self.fetch_batch_from_memory()
# We adjust the qNet network parameters
self.qNet.fit(inputs,targets,batch_size=self.batch_size,epochs=1,verbose=0)
# We decay epsilon leading us to exploit more and explore less.
self.update_epsilon()
# If using DDQN, then periodically update dqNet
self.replay_count+=1
if self.use_DDQN and self.replay_count > 0 and self.replay_count % self.update_period == 0:
self.update_weights()
# Here we determine what action we should take in a given state.
# Notice that we only explore when training.
def act (self,env,state,train):
# Determine if we should explore
if train and np.random.rand() <= self.epsilon:
# Make a random action
return env.action_space.sample()
# Get action from Q-estimates
return self.decide(state)
# This runs a complete episode
def control_episode (self,env,train,render=False):
# We will track total reward over the episode
total_reward=0
# Start new episode
env.reset()
# Initial random move to get pole and cart moving
state, reward, done, _ = env.step(env.action_space.sample())
state = np.reshape(state, [1, 4])
# The control loop for taking steps
done = False
while not done:
# If we want, we render the environment (create an image)
if render:
env.render()
# Determine action
action = self.act(env,state,train)
# Take action, get new state and reward
next_state, reward, done, _ = env.step(action)
next_state = np.reshape(next_state, [1, 4])
# CHANGE
# Add information to memory AND train using replay
# Note we have moved training to every turn!
if train:
self.add_to_memory(state,action,reward,next_state,done)
self.replay()
# Update local variables
state=next_state
total_reward += reward
# the episode has ended, so record result
return total_reward
# Here we control multiple episodes
def control (self,env,num_episodes,max_steps,train,report_every=100):
# We track the total rewards in each episode
final_rewards=np.zeros(num_episodes)
# We will also time how long things are taking.
time_start=time.clock()
# We run the desired number of episodes
for ep in range(num_episodes):
final_rewards[ep]=self.control_episode(env,train,False)
# And periodically we print out a report of how we are performing
if ep!=0 and ep % report_every == 0:
time_end=time.clock()
mean_reward=np.sum(final_rewards[ep-report_every:ep+1])/report_every
print('Episode: {}'.format(ep),
'Recent Average Rewards: {}'.format(mean_reward),
'Epsilon: {:.4f}'.format(self.epsilon),
'Time: {:.2f}'.format(time_end-time_start))
time_start=time_end
# We classify the problem as solved if we get an average
# reward of over 190 during a reporting period.
# This is actually a little low - the system will learn
# to balance the pole, but not to keep the cart centered.
if mean_reward>190:
print("Problem is considered solved!!!")
return final_rewards
print("Problem was not solved.")
return final_rewards
# This will record and show a video of an episode
# (if the video display code is run)
def display_episode(self,env):
wrapped_env=wrap_env(env)
self.control_episode(wrapped_env,False,True)
wrapped_env.close()
show_video()
# This is a function used in showing videos
def show_video():
mp4list = glob.glob('video/*.mp4')
if len(mp4list) > 0:
mp4 = mp4list[0]
video = io.open(mp4, 'r+b').read()
encoded = base64.b64encode(video)
ipythondisplay.display(HTML(data='''<video alt="test" autoplay
loop controls style="height: 400px;">
<source src="data:video/mp4;base64,{0}" type="video/mp4" />
</video>'''.format(encoded.decode('ascii'))))
else:
print("Could not find video")
# This is a function used when recording videos
def wrap_env(env):
env = Monitor(env, './video', force=True)
return env
dueling_dqn_brain=Dueling_DDQN_Brain(use_DDQN=False,hidden_size=24,learning_rate=.001,gamma=.99,batch_size=12,epsilon_decay_episodes=1000,epsilon_stop=.01)
dueling_dqn_brain.display_episode(env_long)
dqn_R=dueling_dqn_brain.control(env,5000,200,True,50)
dueling_dqn_brain.display_episode(env_long)
dueling_ddqn_brain=Dueling_DDQN_Brain(hidden_size=24,learning_rate=.001,gamma=.99,batch_size=12,epsilon_decay_episodes=1000,epsilon_stop=.01)
ddqn_R=dueling_ddqn_brain.control(env,5000,200,True,20)
dueling_ddqn_brain.display_episode(env_long)
display.stop()
```
|
github_jupyter
|
#remove " > /dev/null 2>&1" to see what is going on under the hood
!pip install gym pyvirtualdisplay > /dev/null 2>&1
!apt-get install -y xvfb python-opengl ffmpeg > /dev/null 2>&1
from IPython import display as ipythondisplay
from IPython.display import HTML
from pyvirtualdisplay import Display
display = Display(visible=0, size=(1400, 900))
display.start()
from gym.wrappers import TimeLimit
from gym.envs.classic_control import CartPoleEnv
env = TimeLimit(CartPoleEnv(),max_episode_steps=251)
env_long = TimeLimit(CartPoleEnv(),max_episode_steps=5001)
# Import required libraries
import numpy as np
import matplotlib.pyplot as plt
import random
import time
import glob
import io
import base64
from keras.optimizers import Adam
from keras.layers import Input, Dense, Lambda
from keras.layers.merge import Add
from keras.models import Model
import keras.backend as K
from gym.wrappers import Monitor
# Our Dueling (D)DQN Reinforcement Learning Class
class Dueling_DDQN_Brain:
# Lots of parameters we can initialize
def __init__ (
self,
epsilon_start=.9,
epsilon_stop=.1,
epsilon_decay_episodes=500,
gamma=.9,
use_DDQN=True, # Here we specify if we use a double network
update_period=20,
memory_max=10000,
batch_size=128,
state_size=4,
action_size=2,
hidden_size=256,
learning_rate=.01
):
self.steps=0
self.qNet=self.makeDQN(state_size, action_size, hidden_size,learning_rate)
self.use_DDQN=use_DDQN
# We make the DDQN anyway - we might change our mind about having one
self.dqNet=self.makeDQN(state_size, action_size, hidden_size,learning_rate)
self.update_weights()
self.epsilon=epsilon_start
self.epsilon_stop=epsilon_stop
self.epsilon_decay = self.epsilon_stop / self.epsilon
self.epsilon_decay = self.epsilon_decay ** (1. / float(epsilon_decay_episodes))
self.gamma=gamma
self.memory_max=memory_max
self.memory=[]
self.batch_size=batch_size
self.replay_count=0
self.update_period=update_period
# THIS FUNCTION HAS BEEN CHANGED
# Here we create the DQN we will use
# This will have a state-value/action-advantage structure
def makeDQN(self,state_size, action_size, hidden_size,learning_rate):
# Standard start to the architecture
inputs = Input(shape=(state_size,))
h1 = Dense(hidden_size, activation='relu')(inputs)
h2 = Dense(int(hidden_size/2), activation='relu')(h1)
# Network splits into value and advantage branches
# Value branch
value_dense = Dense(1)(h2)
value = Lambda(lambda s: K.expand_dims(s[:, 0], -1), output_shape=(action_size,))(value_dense)
# Advantage branch
advantage_dense = Dense(action_size)(h2)
advantage = Lambda(lambda a: a[:, :] - K.mean(a[:, :], keepdims=True),
output_shape=(action_size,))(advantage_dense)
# Sum Value and Advantages to get Q-values
output = Add()([value, advantage])
model = Model(input=inputs, output=output)
model.summary()
model.compile(loss='mse', optimizer=Adam(lr=learning_rate))
return model
# This function decays epsilon according to our parameters
def update_epsilon(self):
if self.epsilon > self.epsilon_stop:
self.epsilon *= self.epsilon_decay
# Here we copy the weight values of qNet into dqNet
def update_weights(self):
self.dqNet.set_weights(self.qNet.get_weights())
# We make a decision based on which of our estimated
# Q-values is highest for a given state
def decide (self, state):
Qs = self.qNet.predict(state)
return np.argmax(Qs)
# We store a SARS datum in memory.
def add_to_memory (self,state,action,reward,next_state,done):
if len(self.memory)==self.memory_max:
self.memory.pop(0)
self.memory.append((state,action,reward,next_state,done))
# THIS FUNCTION HAS BEEN CHANGED
# This is where we get the q-value for
# a state for training purposes.
def get_target_q_value (self,next_state,reward,done):
if done:
return reward
# CHANGE:
# We use DDQN or DQN approach depending on setup
elif self.use_DDQN:
# DDQN
# We use qNet to determine which action to take in
# the next state.
action = np.argmax(self.qNet.predict(next_state)[0])
# But then we use dqNet not qNet to predict q_value
# of this action being taken in the next state
q_value = self.dqNet.predict(next_state)[0,action]
return reward + self.gamma * q_value
else:
# DQN
q_value = np.amax(self.qNet.predict(next_state)[0])
return reward + self.gamma * q_value
# THIS FUNCTION HAS BEEN CHANGED
# This is where we fetch data from memory and prepare it
# as a training batch
def fetch_batch_from_memory (self):
# Take a random sample of data store in memory
memory_sample = random.sample(self.memory, min(self.batch_size,len(self.memory)))
# Construct training batch from memory store
states = np.zeros((self.batch_size,4))
q_values = np.zeros((self.batch_size,2))
i=0
for state, action, reward, next_state, done in memory_sample:
# CHANGE:
# We use DDQN or DQN approach depending on setup
if self.use_DDQN:
# DDQN
# Get q-values for state using dqNet, not qNet
q_values[i] = self.dqNet.predict(state)[0]
else:
# DQN
# Get q-values for state using qNet
q_values[i] = self.qNet.predict(state)[0]
# Get current estimate of action's q-value
q_values[i,action] = self.get_target_q_value(next_state, reward, done)
# Add state and q-values to batch
states[i]=state[0]
i+=1
return states,q_values
# This is where we train the network, by 'replaying'
# data stored in memory
def replay (self):
# We only start training once their is at least enough
# data in memory to create a full batch
if self.batch_size <= len(self.memory):
# We prepare the input and target batches
inputs,targets = self.fetch_batch_from_memory()
# We adjust the qNet network parameters
self.qNet.fit(inputs,targets,batch_size=self.batch_size,epochs=1,verbose=0)
# We decay epsilon leading us to exploit more and explore less.
self.update_epsilon()
# If using DDQN, then periodically update dqNet
self.replay_count+=1
if self.use_DDQN and self.replay_count > 0 and self.replay_count % self.update_period == 0:
self.update_weights()
# Here we determine what action we should take in a given state.
# Notice that we only explore when training.
def act (self,env,state,train):
# Determine if we should explore
if train and np.random.rand() <= self.epsilon:
# Make a random action
return env.action_space.sample()
# Get action from Q-estimates
return self.decide(state)
# This runs a complete episode
def control_episode (self,env,train,render=False):
# We will track total reward over the episode
total_reward=0
# Start new episode
env.reset()
# Initial random move to get pole and cart moving
state, reward, done, _ = env.step(env.action_space.sample())
state = np.reshape(state, [1, 4])
# The control loop for taking steps
done = False
while not done:
# If we want, we render the environment (create an image)
if render:
env.render()
# Determine action
action = self.act(env,state,train)
# Take action, get new state and reward
next_state, reward, done, _ = env.step(action)
next_state = np.reshape(next_state, [1, 4])
# CHANGE
# Add information to memory AND train using replay
# Note we have moved training to every turn!
if train:
self.add_to_memory(state,action,reward,next_state,done)
self.replay()
# Update local variables
state=next_state
total_reward += reward
# the episode has ended, so record result
return total_reward
# Here we control multiple episodes
def control (self,env,num_episodes,max_steps,train,report_every=100):
# We track the total rewards in each episode
final_rewards=np.zeros(num_episodes)
# We will also time how long things are taking.
time_start=time.clock()
# We run the desired number of episodes
for ep in range(num_episodes):
final_rewards[ep]=self.control_episode(env,train,False)
# And periodically we print out a report of how we are performing
if ep!=0 and ep % report_every == 0:
time_end=time.clock()
mean_reward=np.sum(final_rewards[ep-report_every:ep+1])/report_every
print('Episode: {}'.format(ep),
'Recent Average Rewards: {}'.format(mean_reward),
'Epsilon: {:.4f}'.format(self.epsilon),
'Time: {:.2f}'.format(time_end-time_start))
time_start=time_end
# We classify the problem as solved if we get an average
# reward of over 190 during a reporting period.
# This is actually a little low - the system will learn
# to balance the pole, but not to keep the cart centered.
if mean_reward>190:
print("Problem is considered solved!!!")
return final_rewards
print("Problem was not solved.")
return final_rewards
# This will record and show a video of an episode
# (if the video display code is run)
def display_episode(self,env):
wrapped_env=wrap_env(env)
self.control_episode(wrapped_env,False,True)
wrapped_env.close()
show_video()
# This is a function used in showing videos
def show_video():
mp4list = glob.glob('video/*.mp4')
if len(mp4list) > 0:
mp4 = mp4list[0]
video = io.open(mp4, 'r+b').read()
encoded = base64.b64encode(video)
ipythondisplay.display(HTML(data='''<video alt="test" autoplay
loop controls style="height: 400px;">
<source src="data:video/mp4;base64,{0}" type="video/mp4" />
</video>'''.format(encoded.decode('ascii'))))
else:
print("Could not find video")
# This is a function used when recording videos
def wrap_env(env):
env = Monitor(env, './video', force=True)
return env
dueling_dqn_brain=Dueling_DDQN_Brain(use_DDQN=False,hidden_size=24,learning_rate=.001,gamma=.99,batch_size=12,epsilon_decay_episodes=1000,epsilon_stop=.01)
dueling_dqn_brain.display_episode(env_long)
dqn_R=dueling_dqn_brain.control(env,5000,200,True,50)
dueling_dqn_brain.display_episode(env_long)
dueling_ddqn_brain=Dueling_DDQN_Brain(hidden_size=24,learning_rate=.001,gamma=.99,batch_size=12,epsilon_decay_episodes=1000,epsilon_stop=.01)
ddqn_R=dueling_ddqn_brain.control(env,5000,200,True,20)
dueling_ddqn_brain.display_episode(env_long)
display.stop()
| 0.740737 | 0.967717 |
# Publications markdown generator for academicpages
Takes a TSV of publications with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook ([see more info here](http://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/what_is_jupyter.html)). The core python code is also in `publications.py`. Run either from the `markdown_generator` folder after replacing `publications.tsv` with one containing your data.
TODO: Make this work with BibTex and other databases of citations, rather than Stuart's non-standard TSV format and citation style.
## Data format
The TSV needs to have the following columns: pub_date, title, venue, excerpt, citation, site_url, and paper_url, with a header at the top.
- `excerpt` and `paper_url` can be blank, but the others must have values.
- `pub_date` must be formatted as YYYY-MM-DD.
- `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper. The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/publications/YYYY-MM-DD-[url_slug]`
This is how the raw file looks (it doesn't look pretty, use a spreadsheet or other program to edit and create).
```
!cat publications.tsv
```
## Import pandas
We are using the very handy pandas library for dataframes.
```
import pandas as pd
```
## Import TSV
Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`.
I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.
```
publications = pd.read_csv("publications.tsv", sep="\t", header=0)
publications
```
## Escape special characters
YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.
```
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
"""Produce entities within text."""
return "".join(html_escape_table.get(c,c) for c in text)
```
## Creating the markdown files
This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page.
```
import os
for row, item in publications.iterrows():
md_filename = str(item.pub_date) + "-" + item.url_slug + ".md"
html_filename = str(item.pub_date) + "-" + item.url_slug
year = item.pub_date[:4]
## YAML variables
md = "---\ntitle: \"" + item.title + '"\n'
md += """collection: publications"""
md += """\npermalink: /publication/""" + html_filename
if len(str(item.excerpt)) > 5:
md += "\nexcerpt: '" + html_escape(item.excerpt) + "'"
md += "\ndate: " + str(item.pub_date)
md += "\nvenue: '" + html_escape(item.venue) + "'"
if len(str(item.paper_url)) > 5:
md += "\npaperurl: '" + item.paper_url + "'"
md += "\ncitation: '" + html_escape(item.citation) + "'"
md += "\n---"
## Markdown description for individual page
if len(str(item.excerpt)) > 5:
md += "\n" + html_escape(item.excerpt) + "\n"
if len(str(item.paper_url)) > 5:
md += "\n[Download paper here](" + item.paper_url + ")\n"
#md += "\nRecommended citation: " + item.citation
md_filename = os.path.basename(md_filename)
with open("../_publications/" + md_filename, 'w') as f:
f.write(md)
```
These files are in the publications directory, one directory below where we're working from.
```
!ls ../_publications/
!cat ../_publications/2009-10-01-paper-title-number-1.md
```
|
github_jupyter
|
!cat publications.tsv
import pandas as pd
publications = pd.read_csv("publications.tsv", sep="\t", header=0)
publications
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
"""Produce entities within text."""
return "".join(html_escape_table.get(c,c) for c in text)
import os
for row, item in publications.iterrows():
md_filename = str(item.pub_date) + "-" + item.url_slug + ".md"
html_filename = str(item.pub_date) + "-" + item.url_slug
year = item.pub_date[:4]
## YAML variables
md = "---\ntitle: \"" + item.title + '"\n'
md += """collection: publications"""
md += """\npermalink: /publication/""" + html_filename
if len(str(item.excerpt)) > 5:
md += "\nexcerpt: '" + html_escape(item.excerpt) + "'"
md += "\ndate: " + str(item.pub_date)
md += "\nvenue: '" + html_escape(item.venue) + "'"
if len(str(item.paper_url)) > 5:
md += "\npaperurl: '" + item.paper_url + "'"
md += "\ncitation: '" + html_escape(item.citation) + "'"
md += "\n---"
## Markdown description for individual page
if len(str(item.excerpt)) > 5:
md += "\n" + html_escape(item.excerpt) + "\n"
if len(str(item.paper_url)) > 5:
md += "\n[Download paper here](" + item.paper_url + ")\n"
#md += "\nRecommended citation: " + item.citation
md_filename = os.path.basename(md_filename)
with open("../_publications/" + md_filename, 'w') as f:
f.write(md)
!ls ../_publications/
!cat ../_publications/2009-10-01-paper-title-number-1.md
| 0.379608 | 0.748053 |
# Task 2
```
# Imports
import re
import string
from datetime import datetime
from collections import defaultdict, Counter
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tqdm import tqdm
import torch
import torch.nn as nn
from torch.nn import Module
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from sklearn.model_selection import train_test_split
from nltk.corpus import stopwords
device = 'cuda'
import random
random.seed(26)
np.random.seed(62)
torch.manual_seed(2021)
LANGUAGE = 'hi'
embedding_path = 'save/embedding_weights_hi_30_epoch_100_dim_10_wsize.pt'
embedding_size = 100
lstm_dim = 50
batch_size = 1
epochs = 3
```
## Load and preprocess data
```
train_data = pd.read_csv('data/hindi_hatespeech.tsv', sep='\t')
print('train:')
display(train_data.head())
train_sentences = train_data['text'].to_numpy()
train_labels = train_data['task_1'].to_numpy()
train_labels[train_labels=='NOT'] = 0
train_labels[train_labels=='HOF'] = 1
train_labels = train_labels.astype(int)
test_data = pd.read_csv('data/hasoc2019_hi_test_gold_2919.tsv', sep='\t')
print('test:')
display(test_data.head())
test_sentences = test_data['text'].to_numpy()
test_labels = test_data['task_1'].to_numpy()
test_labels[test_labels=='NOT'] = 0
test_labels[test_labels=='HOF'] = 1
test_labels = test_labels.astype(int)
def preprocess_texts(sentences):
# remove user taggings
user_tag_pattern = re.compile(r'\@\w*')
sentences = [re.sub(user_tag_pattern, ' ', sentence) for sentence in sentences]
# lower case
sentences = [sentence.lower() for sentence in sentences]
# remove punctuations
http_re = re.compile('http://[^ ]*')
https_re = re.compile('https://[^ ]*')
punctuation = string.punctuation[:2] + string.punctuation[3:]
translator = str.maketrans(punctuation, ' '*len(punctuation))
def clean(s):
s = re.sub(http_re, ' ', s)
s = re.sub(https_re, ' ', s)
s = s.translate(translator)
return s
sentences = [clean(sentence) for sentence in sentences]
# remove number ?
# remove stopwords
if LANGUAGE == 'hi':
stopwords = ['अंदर', 'अत', 'अदि', 'अप', 'अपना', 'अपनि', 'अपनी', 'अपने', 'अभि', 'अभी', 'आदि',
'आप', 'इंहिं', 'इंहें', 'इंहों', 'इतयादि', 'इत्यादि', 'इन', 'इनका', 'इन्हीं', 'इन्हें', 'इन्हों',
'इस', 'इसका', 'इसकि', 'इसकी', 'इसके', 'इसमें', 'इसि', 'इसी', 'इसे', 'उंहिं', 'उंहें',
'उंहों', 'उन', 'उनका', 'उनकि', 'उनकी', 'उनके', 'उनको', 'उन्हीं', 'उन्हें', 'उन्हों', 'उस',
'उसके', 'उसि', 'उसी', 'उसे', 'एक', 'एवं', 'एस', 'एसे', 'ऐसे', 'ओर', 'और', 'कइ',
'कई', 'कर', 'करता', 'करते', 'करना', 'करने', 'करें', 'कहते', 'कहा', 'का', 'काफि',
'काफ़ी', 'कि', 'किंहें', 'किंहों', 'कितना', 'किन्हें', 'किन्हों', 'किया', 'किर', 'किस',
'किसि', 'किसी', 'किसे', 'की', 'कुछ', 'कुल', 'के', 'को', 'कोइ', 'कोई', 'कोन',
'कोनसा', 'कौन', 'कौनसा', 'गया', 'घर', 'जब', 'जहाँ', 'जहां', 'जा', 'जिंहें', 'जिंहों',
'जितना', 'जिधर', 'जिन', 'जिन्हें', 'जिन्हों', 'जिस', 'जिसे', 'जीधर', 'जेसा', 'जेसे',
'जैसा', 'जैसे', 'जो', 'तक', 'तब', 'तरह', 'तिंहें', 'तिंहों', 'तिन', 'तिन्हें', 'तिन्हों',
'तिस', 'तिसे', 'तो', 'था', 'थि', 'थी', 'थे', 'दबारा', 'दवारा', 'दिया', 'दुसरा', 'दुसरे',
'दूसरे', 'दो', 'द्वारा', 'न', 'नहिं', 'नहीं', 'ना', 'निचे', 'निहायत', 'नीचे', 'ने', 'पर',
'पहले', 'पुरा', 'पूरा', 'पे', 'फिर', 'बनि', 'बनी', 'बहि', 'बही', 'बहुत', 'बाद', 'बाला',
'बिलकुल', 'भि', 'भितर', 'भी', 'भीतर', 'मगर', 'मानो', 'मे', 'में', 'यदि', 'यह', 'यहाँ',
'यहां', 'यहि', 'यही', 'या', 'यिह', 'ये', 'रखें', 'रवासा', 'रहा', 'रहे', 'ऱ्वासा', 'लिए',
'लिये', 'लेकिन', 'व', 'वगेरह', 'वरग', 'वर्ग', 'वह', 'वहाँ', 'वहां', 'वहिं', 'वहीं', 'वाले',
'वुह', 'वे', 'वग़ैरह', 'संग', 'सकता', 'सकते', 'सबसे', 'सभि', 'सभी', 'साथ', 'साबुत',
'साभ', 'सारा', 'से', 'सो', 'हि', 'ही', 'हुअ', 'हुआ', 'हुइ', 'हुई', 'हुए', 'हे', 'हें',
'है', 'हैं', 'हो', 'होता', 'होति', 'होती', 'होते', 'होना', 'होने']
elif LANGUAGE == 'en':
stopwords = stopwords.words('english')
sentences = [[word for word in sentence.split() if word not in stopwords] for sentence in sentences]
return sentences
train_sentences = preprocess_texts(train_sentences)
test_sentences = preprocess_texts(test_sentences)
# vocab_size and word->id and id->word
flattened_words = [word for sentence in train_sentences for word in sentence]
V = list(set(flattened_words))
vocab_size = len(V)
print(f'vocab_size: {vocab_size}')
word_to_int = {}
int_to_word = {}
for i, word in enumerate(V):
word_to_int[word] = i
int_to_word[i] = word
train_sentences = [[word_to_int[word] for word in sentence] for sentence in train_sentences]
test_sentences = [[word_to_int[word] for word in sentence if word in word_to_int] for sentence in test_sentences]
print('Number of empty test sentences: ', sum([len(s) == 0 for s in test_sentences]))
```
## Build datasets
```
class HOFDataset(Dataset):
def __init__(self, sentences, labels):
self.data = []
for sentence, label in zip(sentences, labels):
self.data.append(
(torch.tensor(sentence, dtype=torch.long),
torch.tensor(label, dtype=torch.long))
)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]
train_dataset = HOFDataset(train_sentences, train_labels)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_dataset = HOFDataset(test_sentences, test_labels)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
```
## Network architecture
```
class Classifier(Module):
def __init__(self, lstm_dim):
super(Classifier, self).__init__()
self.embed = nn.Embedding(vocab_size, embedding_size)
self.embed.load_state_dict(torch.load(embedding_path))
self.embed.requires_grad = False
self.lstm = nn.LSTM(embedding_size, lstm_dim)
self.fc = nn.Linear(lstm_dim, 1)
def forward(self, inp):
out = self.embed(inp)
out, _ = self.lstm(out)
out = self.fc(out)
return out
clf = Classifier(lstm_dim=lstm_dim).to(device)
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(clf.parameters())
for epoch in range(1, epochs + 1):
losses = 0.
cnt = 0
clf.train()
for texts, labels in tqdm(train_loader):
optimizer.zero_grad()
pred = clf(texts.to(device))
loss = criterion(pred, labels.to(device))
loss.backward()
optimizer.step()
losses += loss.detach().item() * len(texts)
cnt += len(texts)
epoch_loss = losses / cnt
print(f'Epoch {epoch:2}: training loss: {epoch_loss:.4f} over {cnt} training points.')
len(train_sentences)
tr_len = np.array([len(s) for s in train_sentences])
sum(tr_len == 0)
train_sentences = train_sentences[tr_len != 0]
len(train_sentences)
type(train_sentences)
np.where(tr_len == 0)[0]
train_sentences[list(np.where(tr_len != 0)[0])]
z = torch.tensor(train_sentences)
ben_data = pd.read_csv('data/bengali_hatespeech.csv')
ben_data
ben_sentences = ben_data['sentence'].to_numpy()
ben_sentences
ben = preprocess_texts(ben_sentences)
ben_vocab = list(set(word for sentence in ben for word in sentence))
len(ben_vocab)
ben_data.describe()
```
|
github_jupyter
|
# Imports
import re
import string
from datetime import datetime
from collections import defaultdict, Counter
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tqdm import tqdm
import torch
import torch.nn as nn
from torch.nn import Module
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from sklearn.model_selection import train_test_split
from nltk.corpus import stopwords
device = 'cuda'
import random
random.seed(26)
np.random.seed(62)
torch.manual_seed(2021)
LANGUAGE = 'hi'
embedding_path = 'save/embedding_weights_hi_30_epoch_100_dim_10_wsize.pt'
embedding_size = 100
lstm_dim = 50
batch_size = 1
epochs = 3
train_data = pd.read_csv('data/hindi_hatespeech.tsv', sep='\t')
print('train:')
display(train_data.head())
train_sentences = train_data['text'].to_numpy()
train_labels = train_data['task_1'].to_numpy()
train_labels[train_labels=='NOT'] = 0
train_labels[train_labels=='HOF'] = 1
train_labels = train_labels.astype(int)
test_data = pd.read_csv('data/hasoc2019_hi_test_gold_2919.tsv', sep='\t')
print('test:')
display(test_data.head())
test_sentences = test_data['text'].to_numpy()
test_labels = test_data['task_1'].to_numpy()
test_labels[test_labels=='NOT'] = 0
test_labels[test_labels=='HOF'] = 1
test_labels = test_labels.astype(int)
def preprocess_texts(sentences):
# remove user taggings
user_tag_pattern = re.compile(r'\@\w*')
sentences = [re.sub(user_tag_pattern, ' ', sentence) for sentence in sentences]
# lower case
sentences = [sentence.lower() for sentence in sentences]
# remove punctuations
http_re = re.compile('http://[^ ]*')
https_re = re.compile('https://[^ ]*')
punctuation = string.punctuation[:2] + string.punctuation[3:]
translator = str.maketrans(punctuation, ' '*len(punctuation))
def clean(s):
s = re.sub(http_re, ' ', s)
s = re.sub(https_re, ' ', s)
s = s.translate(translator)
return s
sentences = [clean(sentence) for sentence in sentences]
# remove number ?
# remove stopwords
if LANGUAGE == 'hi':
stopwords = ['अंदर', 'अत', 'अदि', 'अप', 'अपना', 'अपनि', 'अपनी', 'अपने', 'अभि', 'अभी', 'आदि',
'आप', 'इंहिं', 'इंहें', 'इंहों', 'इतयादि', 'इत्यादि', 'इन', 'इनका', 'इन्हीं', 'इन्हें', 'इन्हों',
'इस', 'इसका', 'इसकि', 'इसकी', 'इसके', 'इसमें', 'इसि', 'इसी', 'इसे', 'उंहिं', 'उंहें',
'उंहों', 'उन', 'उनका', 'उनकि', 'उनकी', 'उनके', 'उनको', 'उन्हीं', 'उन्हें', 'उन्हों', 'उस',
'उसके', 'उसि', 'उसी', 'उसे', 'एक', 'एवं', 'एस', 'एसे', 'ऐसे', 'ओर', 'और', 'कइ',
'कई', 'कर', 'करता', 'करते', 'करना', 'करने', 'करें', 'कहते', 'कहा', 'का', 'काफि',
'काफ़ी', 'कि', 'किंहें', 'किंहों', 'कितना', 'किन्हें', 'किन्हों', 'किया', 'किर', 'किस',
'किसि', 'किसी', 'किसे', 'की', 'कुछ', 'कुल', 'के', 'को', 'कोइ', 'कोई', 'कोन',
'कोनसा', 'कौन', 'कौनसा', 'गया', 'घर', 'जब', 'जहाँ', 'जहां', 'जा', 'जिंहें', 'जिंहों',
'जितना', 'जिधर', 'जिन', 'जिन्हें', 'जिन्हों', 'जिस', 'जिसे', 'जीधर', 'जेसा', 'जेसे',
'जैसा', 'जैसे', 'जो', 'तक', 'तब', 'तरह', 'तिंहें', 'तिंहों', 'तिन', 'तिन्हें', 'तिन्हों',
'तिस', 'तिसे', 'तो', 'था', 'थि', 'थी', 'थे', 'दबारा', 'दवारा', 'दिया', 'दुसरा', 'दुसरे',
'दूसरे', 'दो', 'द्वारा', 'न', 'नहिं', 'नहीं', 'ना', 'निचे', 'निहायत', 'नीचे', 'ने', 'पर',
'पहले', 'पुरा', 'पूरा', 'पे', 'फिर', 'बनि', 'बनी', 'बहि', 'बही', 'बहुत', 'बाद', 'बाला',
'बिलकुल', 'भि', 'भितर', 'भी', 'भीतर', 'मगर', 'मानो', 'मे', 'में', 'यदि', 'यह', 'यहाँ',
'यहां', 'यहि', 'यही', 'या', 'यिह', 'ये', 'रखें', 'रवासा', 'रहा', 'रहे', 'ऱ्वासा', 'लिए',
'लिये', 'लेकिन', 'व', 'वगेरह', 'वरग', 'वर्ग', 'वह', 'वहाँ', 'वहां', 'वहिं', 'वहीं', 'वाले',
'वुह', 'वे', 'वग़ैरह', 'संग', 'सकता', 'सकते', 'सबसे', 'सभि', 'सभी', 'साथ', 'साबुत',
'साभ', 'सारा', 'से', 'सो', 'हि', 'ही', 'हुअ', 'हुआ', 'हुइ', 'हुई', 'हुए', 'हे', 'हें',
'है', 'हैं', 'हो', 'होता', 'होति', 'होती', 'होते', 'होना', 'होने']
elif LANGUAGE == 'en':
stopwords = stopwords.words('english')
sentences = [[word for word in sentence.split() if word not in stopwords] for sentence in sentences]
return sentences
train_sentences = preprocess_texts(train_sentences)
test_sentences = preprocess_texts(test_sentences)
# vocab_size and word->id and id->word
flattened_words = [word for sentence in train_sentences for word in sentence]
V = list(set(flattened_words))
vocab_size = len(V)
print(f'vocab_size: {vocab_size}')
word_to_int = {}
int_to_word = {}
for i, word in enumerate(V):
word_to_int[word] = i
int_to_word[i] = word
train_sentences = [[word_to_int[word] for word in sentence] for sentence in train_sentences]
test_sentences = [[word_to_int[word] for word in sentence if word in word_to_int] for sentence in test_sentences]
print('Number of empty test sentences: ', sum([len(s) == 0 for s in test_sentences]))
class HOFDataset(Dataset):
def __init__(self, sentences, labels):
self.data = []
for sentence, label in zip(sentences, labels):
self.data.append(
(torch.tensor(sentence, dtype=torch.long),
torch.tensor(label, dtype=torch.long))
)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]
train_dataset = HOFDataset(train_sentences, train_labels)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_dataset = HOFDataset(test_sentences, test_labels)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
class Classifier(Module):
def __init__(self, lstm_dim):
super(Classifier, self).__init__()
self.embed = nn.Embedding(vocab_size, embedding_size)
self.embed.load_state_dict(torch.load(embedding_path))
self.embed.requires_grad = False
self.lstm = nn.LSTM(embedding_size, lstm_dim)
self.fc = nn.Linear(lstm_dim, 1)
def forward(self, inp):
out = self.embed(inp)
out, _ = self.lstm(out)
out = self.fc(out)
return out
clf = Classifier(lstm_dim=lstm_dim).to(device)
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(clf.parameters())
for epoch in range(1, epochs + 1):
losses = 0.
cnt = 0
clf.train()
for texts, labels in tqdm(train_loader):
optimizer.zero_grad()
pred = clf(texts.to(device))
loss = criterion(pred, labels.to(device))
loss.backward()
optimizer.step()
losses += loss.detach().item() * len(texts)
cnt += len(texts)
epoch_loss = losses / cnt
print(f'Epoch {epoch:2}: training loss: {epoch_loss:.4f} over {cnt} training points.')
len(train_sentences)
tr_len = np.array([len(s) for s in train_sentences])
sum(tr_len == 0)
train_sentences = train_sentences[tr_len != 0]
len(train_sentences)
type(train_sentences)
np.where(tr_len == 0)[0]
train_sentences[list(np.where(tr_len != 0)[0])]
z = torch.tensor(train_sentences)
ben_data = pd.read_csv('data/bengali_hatespeech.csv')
ben_data
ben_sentences = ben_data['sentence'].to_numpy()
ben_sentences
ben = preprocess_texts(ben_sentences)
ben_vocab = list(set(word for sentence in ben for word in sentence))
len(ben_vocab)
ben_data.describe()
| 0.398875 | 0.575021 |

```
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import style
import seaborn as sns
%matplotlib inline
data = pd.read_csv('titanic_data.csv')
heatmap_data = pd.read_csv('titanic_data.csv')
```
<h1 style="font-family:verdana; color:blue">Exploratory Data Analysis (EDA)</h1>
```
data.head()
data.shape
data.columns
```
sibsp: # of siblings / spouses aboard the Titanic
parch: # of parents / children aboard the Titanic
ticket: Ticket number
fare: Passenger fare
cabin: Cabin number
embarked: Port of Embarkation
```
data.info()
data.describe()
fig,ax = plt.subplots(figsize=(12,10))
cmap = sns.diverging_palette( 220 , 10 , as_cmap = True )
ax = sns.heatmap(heatmap_data.corr(), vmin= -1, vmax= 1, annot=True, cmap=cmap )
```
What data is actually missing?
```
missing_count = data.isnull().sum().sort_values(ascending=False)
percentage = (missing_count / data.shape[0]) *100
percentage = round(percentage,1)
missing_df = pd.concat([missing_count, percentage], keys = ['Total Missing', '%'], axis=1)
print(missing_df.head(3))
```
- `Embarked` feature has only 2 missing values, which can easily be filled.
- `Age` has 177 missing values which are 19% of the whole dataset, so it can be filled but more trickier than the `Embarked`
- 77% of the `Cabin` values are missing so we might drop it.
Deleting features that's not important/not contribute to the survival of passengers.
```
data.columns
data.hist(bins=10, figsize=(20,15))
plt.show()
df = data
df['Sex'].value_counts()
df['Pclass'].value_counts()
df['Sex'].unique()
```
## Age, Sex and Survival
```
FacetGrid = sns.FacetGrid(df, hue='Survived', aspect=4)
FacetGrid.map(sns.kdeplot, 'Age', shade=True)
FacetGrid.set(xlim=(0, df['Age'].max()))
FacetGrid.add_legend()
survived = 'Survived'
not_survived = 'Not Survived'
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12, 5))
for i,gen in enumerate(df['Sex'].unique()):
print(gen)
gender = df[df['Sex'] == gen]
print("Number of ", gen, ": ", gender.shape[0])
gender_survived = gender[gender['Survived'] == 1]
gender_not_survived = gender[gender['Survived'] == 0]
print("Number of ", gen, "Survived: ", gender_survived.shape[0])
survival_percentage = (gender_survived.shape[0] / gender.shape[0])*100
print("Percantage of ", gen, "survived: ", "%.2f" % survival_percentage, "%")
print("\n ====== \n")
#plot
ax = sns.distplot(gender_survived.Age.dropna(), bins= 18, label= survived, ax= axes[i], kde=False)
ax = sns.distplot(gender_survived.Age.dropna(), bins=40, label= not_survived, ax= axes[i], kde=False)
ax.legend()
ax.set_title(gen)
plt.show()
```
`Age` vs. `Sex` Survival Observation
===========================================
**Male:**
Males between 18 to 30 years old are most likely to survive.
**Female:**
Females between 15 to 40 are most likely to survive.
**Infants:**
Infants also have higher probability of survival.
## Embarked, Pclass and Sex:
```
FacetGrid = sns.FacetGrid(df, row='Embarked', size= 4.5, aspect= 1.6)
FacetGrid.map(sns.pointplot, 'Pclass', 'Survived', 'Sex', palette="husl", order=None, hue_order=None)
FacetGrid.add_legend()
```
### Embarked, Pclass and Sex **Observation**:
**Female:**
Females are more likely to survive on port Q and S, while they have less chance of survival on port C.
**Male:**
Males have higher chance of survival on port C and less chance of survival on port Q and S.
_**Note:**_ It seems like the `Pclass` is also correlated with survival rate so we need to investigate more on that.
## Pclass vs. Survival
```
for cls in df['Pclass'].unique():
print("class: ", cls)
cls_passengers = df[df['Pclass'] == cls]
print("Number of passengers in class", cls, ": ", cls_passengers.shape[0])
cls_survived = cls_passengers[cls_passengers['Survived'] == 1]
print("Number of passengers survived in class ", cls, ":", cls_survived.shape[0])
cls_survival_percentage = (cls_survived.shape[0] / cls_passengers.shape[0])*100
print("Percantage of passengers survived in class ",cls, ": ", "%.2f" % cls_survival_percentage, "%")
print("\n ====== \n")
sns.barplot(data=df, x='Pclass', y='Survived', palette='coolwarm')
fig, ax = plt.subplots(figsize=(15,9))
sns.violinplot(x="Pclass", y="Age", hue='Survived', data=df, split=True, bw=0.05 , palette='husl', ax=ax)
plt.title('Survivals for Age and Pclass ')
plt.show()
g = sns.factorplot(x="Pclass", y="Age", hue="Survived", col="Sex", data=df, kind="swarm", dodge=True, palette='husl', size=8, aspect=.9, s=8)
```
## Pclass vs. Survival Observation
A person in Pclass 1 has high probability to survive while the inverse is True for Pclass 3 which has a high probability not to survive.
## SibSp and Parch:
```
for dataset in [df]:
dataset['Relatives'] = dataset['SibSp'] + dataset['Parch']
dataset.loc[dataset['Relatives'] > 0, 'Alone'] = 0
dataset.loc[dataset['Relatives'] == 0, 'Alone'] =1
dataset['Alone'] = dataset['Alone'].astype(int)
pd.crosstab(df.Relatives,df.Survived).apply(lambda r: r/r.sum(), axis=1).style.background_gradient(cmap='summer_r')
sns.factorplot('Relatives', 'Survived', data = df, aspect=2.5)
```
## SibSp and Parch Observation:
If a person has siblings between 1 to 3 then he has a higher probability of survivial, while having number of siblings less than 1 or greater than 3 makes the chance of survival very low except for some cases with 6 siblings.
=========================================================================================
=========================================================================================
<h1 style="font-family:verdana; color:blue"> Feature Engineering - DATA WRANGLING </h1>
1) Dropping Features that may not contribute to survival
- `Ticket` feature may be dropped from our analysis as it contains high ratio of duplicates (22%) and there may not be a correlation between Ticket and survival.
- `Cabin` feature may be dropped as it is highly incomplete or contains many null values.
- `PassengerId` may be dropped from training dataset as it does not contribute to survival.
```
print('No. of columns before dropping: ', df.shape[1])
df.drop(['Ticket', 'Cabin', 'PassengerId'], axis=1, inplace=True)
print('No. of columns After dropping: ', df.shape[1])
```
2)`Name` feature: Names may not correlate with the survival so we want to drop it, but first, what if the Titles of names itself correlate with the survival?
We need to investigate that before dropping the name feature so, we are going to replace names by it's Titles first using Regular Expressions RegEX pattern (\w+\.) matches the first word which ends with a dot character within Name feature.
```
for dataset in [df]:
dataset['Title'] = dataset['Name'].str.extract(' ([A-Za-z]+)\.', expand=False)
pd.crosstab(df['Sex'], df['Title']).style.background_gradient(cmap='summer_r')
```
Replacing raraly used titles with rare and replacing other titles with a more common one.
```
for dataset in [df]:
dataset['Title']= dataset['Title'].replace(['Capt', 'Col', 'Countess', 'Don', 'Dr', 'Jonkheer',\
'Lady', 'Major', 'Rev', 'Sir'],'Rare')
dataset['Title']= dataset['Title'].replace(['Mlle', 'Ms'], 'Miss')
dataset['Title']= dataset['Title'].replace('Mme', 'Mrs')
df[['Title', 'Survived']]. groupby(['Title'], as_index=False).mean().sort_values(by='Survived', ascending=False)
```
Convert categorical data to ordinal
```
Title_mapping={"Mrs":1, "Miss":2, "Mr":3, "Master":4, "Rare":5}
for dataset in [df]:
dataset['Title'] = dataset['Title'].map(Title_mapping)
dataset['Title'] = dataset['Title'].fillna(0)
df.head()
```
Now, after extracting Titles from Names we can safely drop the `Name` feature.
```
df.drop(['Name'], axis=1, inplace=True)
```
## Converting String values into numerical.
Most models need all values to be numerical in order to perform better.
3) converting categorical `Sex` values into Numerical discrete categories.
```
for dataset in [df]:
dataset['Sex'] = dataset['Sex']. map({"male":0, "female":1}).astype(int)
df.head(2)
```
# completing missing Values
### Completing Age values.
```
grid = sns.FacetGrid(df, row='Pclass', col='Sex', size=2.2, aspect=1.6)
grid.map(plt.hist, 'Age', alpha=.5, bins=20)
grid.add_legend()
```
`Age` feature is a continous feature.
the method that we will confirm for filling missing `Age` values is:
> An accurate way of guessing missing values is to use other correlated features. In our case we note correlation among Age, Gender, and Pclass. Guess Age values using median values for Age across sets of Pclass and Gender feature combinations. So, median Age for Pclass=1 and Gender=0, Pclass=1 and Gender=1, and so on...
```
df['Age'] = df.groupby(['Survived','Pclass'])['Age'].apply(lambda x: x.fillna(x.median()))
df['Age'] = df['Age'].astype(int)
```
### Converting Age into ordinal by making Age categories
```
df['Age_Cat'] = df['Age']
df.head(3)
def age_to_cat(age):
if age <4:
return 0 #baby
elif age <10:
return 1 #child
elif age <21:
return 2 #teen
elif age <33:
return 3 #young adult
elif age <50: #adult
return 4
return 5 #elder
age_cat = {
0: "baby",
1: "child",
2: "teen",
3: "yound adult",
4: "adult",
5: "elder"
}
df['Age_Cat'] = df['Age_Cat'].apply(age_to_cat)
df.head(3)
df['Age_Cat'].hist(bins = 20, figsize=(10,7), alpha=.5)
plt.show()
```
### Completing Embarked values.
Since the `Embarked` feature has only 2 missing values, we will simply fill these with the most common one.
```
df['Embarked'].describe()
df['Embarked'] = df['Embarked'].fillna('S')
```
# Converting Embarked categorical feature to numeric¶
```
for dataset in [df]:
dataset['Embarked'] = dataset['Embarked'].map({"S":0, "C":1, "Q":2})
df.head()
```
# Converting Fare from Float to int¶
```
df['Fare'] = df['Fare'].astype(int)
```
# Converting Fare to ordinal categories
```
df['FareBand'] = pd.qcut(df['Fare'], 4)
df[['FareBand', 'Survived']].groupby(['FareBand'], as_index=False).mean().sort_values(by='FareBand', ascending=True)
for dataset in [df]:
dataset.loc[ dataset['Fare'] <= 7.0, 'Fare'] = 0
dataset.loc[(dataset['Fare'] > 7.0) & (dataset['Fare'] <= 14.0), 'Fare'] = 1
dataset.loc[(dataset['Fare'] > 14.0) & (dataset['Fare'] <= 31), 'Fare'] = 2
dataset.loc[ dataset['Fare'] > 31, 'Fare'] = 3
dataset['Fare'] = dataset['Fare'].astype(int)
df = df.drop(['FareBand'], axis=1)
df.head()
```
=========================================================================================
=========================================================================================
<h1 style="font-family:verdana; color:blue"> Model Prediction</h1>
Every Machine Learning Model consists of three parts which we will walk through:
- Representation
- Evaluation
- Optimization
First we need to identify the type of te problem which is **_Supervised_** learning problem beacuse we already have the labeled target variable, and it's a **_classification_** problem because our dependant variable consists of two categories.
## splitting data
```
from sklearn.model_selection import train_test_split
X = df.drop(['Survived'], axis=1)
y = df['Survived']
X_train,X_test,y_train,y_test = train_test_split(X, y, test_size=.3, random_state=42)
```
# Representation
our set of hypothesis will contain set of algorithms which we will evaluate and optimize the best one later on.
The Algorithms which I will train the model on are:
- Logistic Regression
- Random Forest
- KNN
- SVM
- Decision Trees
```
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier as RandomForest
from sklearn.neighbors import KNeighborsClassifier as KNeighbors
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier as DecisionTree
#Logistic Regression
logReg = LogisticRegression()
logReg.fit(X_train, y_train)
y_pred = logReg.predict(X_test)
acc_logReg = round(logReg.score(X_train, y_train) * 100, 2)
acc_logReg
#Random Forest
rf = RandomForest(n_estimators=100)
rf.fit(X_train, y_train)
y_pred = rf.predict(X_test)
acc_rf = round(rf.score(X_train, y_train) * 100, 2)
acc_rf
#KNN
knn = KNeighbors(n_neighbors=3)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
acc_knn = round(knn.score(X_train, y_train) * 100, 2)
acc_knn
#SVM
svc = SVC()
svc.fit(X_train, y_train)
y_pred = svc.predict(X_test)
acc_svc = round(svc.score(X_train, y_train) * 100, 2)
acc_svc
# Decision Tree
dt = DecisionTree()
dt.fit(X_train, y_train)
y_pred = dt.predict(X_test)
acc_dt = round(dt.score(X_train, y_train) * 100, 2)
acc_dt
```
# Evaluation
We can now rank our evaluation of all the models to choose the best one for our problem. While both Decision Tree and Random Forest score the same, we choose to use Random Forest as they correct for decision trees' habit of overfitting to their training set.
```
models = pd.DataFrame({
'Model': ['Logistic Regression', 'Random Forest', 'KNN', 'SVC', 'Decision Tree'],\
'Score': [acc_logReg, acc_rf, acc_knn, acc_svc, acc_dt]})
models.sort_values(by='Score', ascending=False)
```
# Optimization - Hyper Parameter tuning
```
from sklearn.model_selection import GridSearchCV, cross_val_score
from sklearn.metrics import fbeta_score, make_scorer
clf = RandomForest(random_state=42)
param_grid = {"n_estimators": [10,100, 1000, 1500],\
"min_samples_leaf" : [1, 5, 10],\
"min_samples_split" : [2, 4, 10],\
"max_depth":[10, 20, 30, 40, 50, 60, 70, 80, 90, 100, None],
"criterion" : ["gini", "entropy"]}
scorer = make_scorer(fbeta_score, beta=0.5)
grid_obj = GridSearchCV(clf, param_grid=param_grid, cv=10, scoring=scorer)
grid_fit = grid_obj.fit(X_train, y_train)
# Get the estimator
best_clf = grid_fit.best_estimator_
grid_y_pred = grid_obj.predict(X_test)
grid_score = round(grid_obj.score(X_train, y_train) * 100, 2)
print("Best Estimators: ", best_clf)
print("GridSearchCV score: ", grid_score)
```
# Some More Evaluations
## Classification Report
```
from sklearn.metrics import classification_report
print (classification_report(y_test, grid_y_pred))
```
- **Precision:** Our model predicts 87% of the time, a passengers survival correctly (precision).
- **Recall:** tells us that it predicted the survival of 69% of the people who actually survived.
## Confusion Matrix
```
from sklearn.metrics import confusion_matrix
confusion_matrix = confusion_matrix(y_test, grid_y_pred)
sns.heatmap(confusion_matrix, annot=True, fmt="d")
```
# ROC AUC Curve¶
```
from sklearn.metrics import roc_curve
false_positive_rate, true_positive_rate, threshold = roc_curve(y_test, grid_y_pred)
# plotting them against each other
def plot_roc_curve(false_positive_rate, true_positive_rate, label=None):
plt.plot(false_positive_rate, true_positive_rate, linewidth=2, label=label)
plt.plot([0, 1], [0, 1], 'r', linewidth=4)
plt.axis([0, 1, 0, 1])
plt.xlabel('False Positive Rate (FPR)', fontsize=16)
plt.ylabel('True Positive Rate (TPR)', fontsize=16)
plt.figure(figsize=(14, 7))
plot_roc_curve(false_positive_rate, true_positive_rate)
plt.show()
from sklearn.metrics import roc_auc_score
roc_auc_score = roc_auc_score(y_test, grid_y_pred)
print("Roc score: ", round(roc_auc_score,2)*100, "%")
```
|
github_jupyter
|
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import style
import seaborn as sns
%matplotlib inline
data = pd.read_csv('titanic_data.csv')
heatmap_data = pd.read_csv('titanic_data.csv')
data.head()
data.shape
data.columns
data.info()
data.describe()
fig,ax = plt.subplots(figsize=(12,10))
cmap = sns.diverging_palette( 220 , 10 , as_cmap = True )
ax = sns.heatmap(heatmap_data.corr(), vmin= -1, vmax= 1, annot=True, cmap=cmap )
missing_count = data.isnull().sum().sort_values(ascending=False)
percentage = (missing_count / data.shape[0]) *100
percentage = round(percentage,1)
missing_df = pd.concat([missing_count, percentage], keys = ['Total Missing', '%'], axis=1)
print(missing_df.head(3))
data.columns
data.hist(bins=10, figsize=(20,15))
plt.show()
df = data
df['Sex'].value_counts()
df['Pclass'].value_counts()
df['Sex'].unique()
FacetGrid = sns.FacetGrid(df, hue='Survived', aspect=4)
FacetGrid.map(sns.kdeplot, 'Age', shade=True)
FacetGrid.set(xlim=(0, df['Age'].max()))
FacetGrid.add_legend()
survived = 'Survived'
not_survived = 'Not Survived'
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12, 5))
for i,gen in enumerate(df['Sex'].unique()):
print(gen)
gender = df[df['Sex'] == gen]
print("Number of ", gen, ": ", gender.shape[0])
gender_survived = gender[gender['Survived'] == 1]
gender_not_survived = gender[gender['Survived'] == 0]
print("Number of ", gen, "Survived: ", gender_survived.shape[0])
survival_percentage = (gender_survived.shape[0] / gender.shape[0])*100
print("Percantage of ", gen, "survived: ", "%.2f" % survival_percentage, "%")
print("\n ====== \n")
#plot
ax = sns.distplot(gender_survived.Age.dropna(), bins= 18, label= survived, ax= axes[i], kde=False)
ax = sns.distplot(gender_survived.Age.dropna(), bins=40, label= not_survived, ax= axes[i], kde=False)
ax.legend()
ax.set_title(gen)
plt.show()
FacetGrid = sns.FacetGrid(df, row='Embarked', size= 4.5, aspect= 1.6)
FacetGrid.map(sns.pointplot, 'Pclass', 'Survived', 'Sex', palette="husl", order=None, hue_order=None)
FacetGrid.add_legend()
for cls in df['Pclass'].unique():
print("class: ", cls)
cls_passengers = df[df['Pclass'] == cls]
print("Number of passengers in class", cls, ": ", cls_passengers.shape[0])
cls_survived = cls_passengers[cls_passengers['Survived'] == 1]
print("Number of passengers survived in class ", cls, ":", cls_survived.shape[0])
cls_survival_percentage = (cls_survived.shape[0] / cls_passengers.shape[0])*100
print("Percantage of passengers survived in class ",cls, ": ", "%.2f" % cls_survival_percentage, "%")
print("\n ====== \n")
sns.barplot(data=df, x='Pclass', y='Survived', palette='coolwarm')
fig, ax = plt.subplots(figsize=(15,9))
sns.violinplot(x="Pclass", y="Age", hue='Survived', data=df, split=True, bw=0.05 , palette='husl', ax=ax)
plt.title('Survivals for Age and Pclass ')
plt.show()
g = sns.factorplot(x="Pclass", y="Age", hue="Survived", col="Sex", data=df, kind="swarm", dodge=True, palette='husl', size=8, aspect=.9, s=8)
for dataset in [df]:
dataset['Relatives'] = dataset['SibSp'] + dataset['Parch']
dataset.loc[dataset['Relatives'] > 0, 'Alone'] = 0
dataset.loc[dataset['Relatives'] == 0, 'Alone'] =1
dataset['Alone'] = dataset['Alone'].astype(int)
pd.crosstab(df.Relatives,df.Survived).apply(lambda r: r/r.sum(), axis=1).style.background_gradient(cmap='summer_r')
sns.factorplot('Relatives', 'Survived', data = df, aspect=2.5)
print('No. of columns before dropping: ', df.shape[1])
df.drop(['Ticket', 'Cabin', 'PassengerId'], axis=1, inplace=True)
print('No. of columns After dropping: ', df.shape[1])
for dataset in [df]:
dataset['Title'] = dataset['Name'].str.extract(' ([A-Za-z]+)\.', expand=False)
pd.crosstab(df['Sex'], df['Title']).style.background_gradient(cmap='summer_r')
for dataset in [df]:
dataset['Title']= dataset['Title'].replace(['Capt', 'Col', 'Countess', 'Don', 'Dr', 'Jonkheer',\
'Lady', 'Major', 'Rev', 'Sir'],'Rare')
dataset['Title']= dataset['Title'].replace(['Mlle', 'Ms'], 'Miss')
dataset['Title']= dataset['Title'].replace('Mme', 'Mrs')
df[['Title', 'Survived']]. groupby(['Title'], as_index=False).mean().sort_values(by='Survived', ascending=False)
Title_mapping={"Mrs":1, "Miss":2, "Mr":3, "Master":4, "Rare":5}
for dataset in [df]:
dataset['Title'] = dataset['Title'].map(Title_mapping)
dataset['Title'] = dataset['Title'].fillna(0)
df.head()
df.drop(['Name'], axis=1, inplace=True)
for dataset in [df]:
dataset['Sex'] = dataset['Sex']. map({"male":0, "female":1}).astype(int)
df.head(2)
grid = sns.FacetGrid(df, row='Pclass', col='Sex', size=2.2, aspect=1.6)
grid.map(plt.hist, 'Age', alpha=.5, bins=20)
grid.add_legend()
df['Age'] = df.groupby(['Survived','Pclass'])['Age'].apply(lambda x: x.fillna(x.median()))
df['Age'] = df['Age'].astype(int)
df['Age_Cat'] = df['Age']
df.head(3)
def age_to_cat(age):
if age <4:
return 0 #baby
elif age <10:
return 1 #child
elif age <21:
return 2 #teen
elif age <33:
return 3 #young adult
elif age <50: #adult
return 4
return 5 #elder
age_cat = {
0: "baby",
1: "child",
2: "teen",
3: "yound adult",
4: "adult",
5: "elder"
}
df['Age_Cat'] = df['Age_Cat'].apply(age_to_cat)
df.head(3)
df['Age_Cat'].hist(bins = 20, figsize=(10,7), alpha=.5)
plt.show()
df['Embarked'].describe()
df['Embarked'] = df['Embarked'].fillna('S')
for dataset in [df]:
dataset['Embarked'] = dataset['Embarked'].map({"S":0, "C":1, "Q":2})
df.head()
df['Fare'] = df['Fare'].astype(int)
df['FareBand'] = pd.qcut(df['Fare'], 4)
df[['FareBand', 'Survived']].groupby(['FareBand'], as_index=False).mean().sort_values(by='FareBand', ascending=True)
for dataset in [df]:
dataset.loc[ dataset['Fare'] <= 7.0, 'Fare'] = 0
dataset.loc[(dataset['Fare'] > 7.0) & (dataset['Fare'] <= 14.0), 'Fare'] = 1
dataset.loc[(dataset['Fare'] > 14.0) & (dataset['Fare'] <= 31), 'Fare'] = 2
dataset.loc[ dataset['Fare'] > 31, 'Fare'] = 3
dataset['Fare'] = dataset['Fare'].astype(int)
df = df.drop(['FareBand'], axis=1)
df.head()
from sklearn.model_selection import train_test_split
X = df.drop(['Survived'], axis=1)
y = df['Survived']
X_train,X_test,y_train,y_test = train_test_split(X, y, test_size=.3, random_state=42)
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier as RandomForest
from sklearn.neighbors import KNeighborsClassifier as KNeighbors
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier as DecisionTree
#Logistic Regression
logReg = LogisticRegression()
logReg.fit(X_train, y_train)
y_pred = logReg.predict(X_test)
acc_logReg = round(logReg.score(X_train, y_train) * 100, 2)
acc_logReg
#Random Forest
rf = RandomForest(n_estimators=100)
rf.fit(X_train, y_train)
y_pred = rf.predict(X_test)
acc_rf = round(rf.score(X_train, y_train) * 100, 2)
acc_rf
#KNN
knn = KNeighbors(n_neighbors=3)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
acc_knn = round(knn.score(X_train, y_train) * 100, 2)
acc_knn
#SVM
svc = SVC()
svc.fit(X_train, y_train)
y_pred = svc.predict(X_test)
acc_svc = round(svc.score(X_train, y_train) * 100, 2)
acc_svc
# Decision Tree
dt = DecisionTree()
dt.fit(X_train, y_train)
y_pred = dt.predict(X_test)
acc_dt = round(dt.score(X_train, y_train) * 100, 2)
acc_dt
models = pd.DataFrame({
'Model': ['Logistic Regression', 'Random Forest', 'KNN', 'SVC', 'Decision Tree'],\
'Score': [acc_logReg, acc_rf, acc_knn, acc_svc, acc_dt]})
models.sort_values(by='Score', ascending=False)
from sklearn.model_selection import GridSearchCV, cross_val_score
from sklearn.metrics import fbeta_score, make_scorer
clf = RandomForest(random_state=42)
param_grid = {"n_estimators": [10,100, 1000, 1500],\
"min_samples_leaf" : [1, 5, 10],\
"min_samples_split" : [2, 4, 10],\
"max_depth":[10, 20, 30, 40, 50, 60, 70, 80, 90, 100, None],
"criterion" : ["gini", "entropy"]}
scorer = make_scorer(fbeta_score, beta=0.5)
grid_obj = GridSearchCV(clf, param_grid=param_grid, cv=10, scoring=scorer)
grid_fit = grid_obj.fit(X_train, y_train)
# Get the estimator
best_clf = grid_fit.best_estimator_
grid_y_pred = grid_obj.predict(X_test)
grid_score = round(grid_obj.score(X_train, y_train) * 100, 2)
print("Best Estimators: ", best_clf)
print("GridSearchCV score: ", grid_score)
from sklearn.metrics import classification_report
print (classification_report(y_test, grid_y_pred))
from sklearn.metrics import confusion_matrix
confusion_matrix = confusion_matrix(y_test, grid_y_pred)
sns.heatmap(confusion_matrix, annot=True, fmt="d")
from sklearn.metrics import roc_curve
false_positive_rate, true_positive_rate, threshold = roc_curve(y_test, grid_y_pred)
# plotting them against each other
def plot_roc_curve(false_positive_rate, true_positive_rate, label=None):
plt.plot(false_positive_rate, true_positive_rate, linewidth=2, label=label)
plt.plot([0, 1], [0, 1], 'r', linewidth=4)
plt.axis([0, 1, 0, 1])
plt.xlabel('False Positive Rate (FPR)', fontsize=16)
plt.ylabel('True Positive Rate (TPR)', fontsize=16)
plt.figure(figsize=(14, 7))
plot_roc_curve(false_positive_rate, true_positive_rate)
plt.show()
from sklearn.metrics import roc_auc_score
roc_auc_score = roc_auc_score(y_test, grid_y_pred)
print("Roc score: ", round(roc_auc_score,2)*100, "%")
| 0.273089 | 0.936518 |
# MULTI-LAYER PERCEPTRON ON MNIST
```
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
%matplotlib inline
print ("PACKAGES LOADED")
```
# LOAD MNIST
```
mnist = input_data.read_data_sets('data/', one_hot=True)
```
# DEFINE MODEL
```
# NETWORK TOPOLOGIES
n_hidden_1 = 256
n_hidden_2 = 128
n_input = 784
n_classes = 10
# INPUTS AND OUTPUTS
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_classes])
# NETWORK PARAMETERS
stddev = 0.1
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1], stddev=stddev)),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2], stddev=stddev)),
'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes], stddev=stddev))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
print ("NETWORK READY")
```
# MLP AS A FUNCTION
```
def multilayer_perceptron(_X, _weights, _biases):
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(_X, _weights['h1']), _biases['b1']))
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, _weights['h2']), _biases['b2']))
return (tf.matmul(layer_2, _weights['out']) + _biases['out'])
```
# DEFINE FUNCTIONS
```
# PREDICTION
pred = multilayer_perceptron(x, weights, biases)
##返回的结果是样本数*n_class ,一个样本为一行,n*784,样本是按照行展开的,[0,:]指的就是第一个样本
# LOSS AND OPTIMIZER
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=pred)) ##函数是TensorFlow中常用的求交叉熵的函数
# optm = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(cost)
optm = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)##动量梯度下降法
corr = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accr = tf.reduce_mean(tf.cast(corr, "float"))##此处是计算的准确率
# INITIALIZER
init = tf.initialize_all_variables()
print ("FUNCTIONS READY")
```
# RUN
```
# PARAMETERS
training_epochs = 20
batch_size = 100
display_step = 4
# LAUNCH THE GRAPH
sess = tf.Session()
sess.run(init)
# OPTIMIZE
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(mnist.train.num_examples/batch_size)
# ITERATION
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
feeds = {x: batch_xs, y: batch_ys}
sess.run(optm, feed_dict=feeds)
avg_cost += sess.run(cost, feed_dict=feeds)
avg_cost = avg_cost / total_batch
# DISPLAY
if (epoch+1) % display_step == 0:
print ("Epoch: %03d/%03d cost: %.9f" % (epoch, training_epochs, avg_cost))
feeds = {x: batch_xs, y: batch_ys}
train_acc = sess.run(accr, feed_dict=feeds)
print ("TRAIN ACCURACY: %.3f" % (train_acc))
feeds = {x: mnist.test.images, y: mnist.test.labels}
test_acc = sess.run(accr, feed_dict=feeds)
print ("TEST ACCURACY: %.3f" % (test_acc))
print ("OPTIMIZATION FINISHED")
```
|
github_jupyter
|
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
%matplotlib inline
print ("PACKAGES LOADED")
mnist = input_data.read_data_sets('data/', one_hot=True)
# NETWORK TOPOLOGIES
n_hidden_1 = 256
n_hidden_2 = 128
n_input = 784
n_classes = 10
# INPUTS AND OUTPUTS
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_classes])
# NETWORK PARAMETERS
stddev = 0.1
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1], stddev=stddev)),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2], stddev=stddev)),
'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes], stddev=stddev))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
print ("NETWORK READY")
def multilayer_perceptron(_X, _weights, _biases):
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(_X, _weights['h1']), _biases['b1']))
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, _weights['h2']), _biases['b2']))
return (tf.matmul(layer_2, _weights['out']) + _biases['out'])
# PREDICTION
pred = multilayer_perceptron(x, weights, biases)
##返回的结果是样本数*n_class ,一个样本为一行,n*784,样本是按照行展开的,[0,:]指的就是第一个样本
# LOSS AND OPTIMIZER
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=pred)) ##函数是TensorFlow中常用的求交叉熵的函数
# optm = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(cost)
optm = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)##动量梯度下降法
corr = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accr = tf.reduce_mean(tf.cast(corr, "float"))##此处是计算的准确率
# INITIALIZER
init = tf.initialize_all_variables()
print ("FUNCTIONS READY")
# PARAMETERS
training_epochs = 20
batch_size = 100
display_step = 4
# LAUNCH THE GRAPH
sess = tf.Session()
sess.run(init)
# OPTIMIZE
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(mnist.train.num_examples/batch_size)
# ITERATION
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
feeds = {x: batch_xs, y: batch_ys}
sess.run(optm, feed_dict=feeds)
avg_cost += sess.run(cost, feed_dict=feeds)
avg_cost = avg_cost / total_batch
# DISPLAY
if (epoch+1) % display_step == 0:
print ("Epoch: %03d/%03d cost: %.9f" % (epoch, training_epochs, avg_cost))
feeds = {x: batch_xs, y: batch_ys}
train_acc = sess.run(accr, feed_dict=feeds)
print ("TRAIN ACCURACY: %.3f" % (train_acc))
feeds = {x: mnist.test.images, y: mnist.test.labels}
test_acc = sess.run(accr, feed_dict=feeds)
print ("TEST ACCURACY: %.3f" % (test_acc))
print ("OPTIMIZATION FINISHED")
| 0.546496 | 0.870597 |
# eurostat death data weekly
* https://appsso.eurostat.ec.europa.eu/nui/show.do?dataset=demo_r_mweek3&lang=en
* https://ec.europa.eu/eurostat/statistics-explained/index.php?title=Weekly_death_statistics
see also
https://www.euromomo.eu/graphs-and-maps
```
import pandas as pd
import pylab as plt
import numpy as np
import seaborn as sns
```
## read raw data
```
raw = pd.read_csv('demo_r_mweek3_1_Data_byage.csv', thousands=',', parse_dates=['TIME'])
raw['YEAR'] = raw.TIME.str[:4].astype(int)
raw['WEEK'] = raw.TIME.str[5:].astype(int)
raw['Value'] = raw.Value.astype('float')
oldest = raw.AGE.unique()[14:19]
old = raw.AGE.unique()[12:14]
middle = raw.AGE.unique()[10:12]
young = raw.AGE.unique()[:10]
def agegroup(x):
if x in oldest:
return '>70'
elif x in old:
return '60-69'
elif x in middle:
return '50-59'
else:
return '<50'
raw['GROUP'] = raw.AGE.apply(agegroup)
raw.dtypes
```
## aggregate the raw data by age and sex to get totals
https://stackoverflow.com/questions/45436873/pandas-how-to-create-a-datetime-object-from-week-and-year
```
df=raw.groupby(['TIME','YEAR','WEEK','GEO']).agg({'Value':np.sum}).reset_index().replace(0,np.nan)
#create the date column
df['DATE'] = pd.to_datetime(df.YEAR.astype(str), format='%Y') + \
pd.to_timedelta(df.WEEK.mul(7).astype(str) + ' days')
df['MONTH'] = df.DATE.dt.month
df
p = pd.pivot_table(df,index='DATE',columns='GEO',values='Value')
#x.columns = x.columns.get_level_values(1)
print (p.columns)
p[-3:]
countries1 = ['France','Sweden','Denmark']
countries2 = ['France','Italy','Spain','Portugal','Austria','Denmark','Sweden','Norway']
all = ['Belgium','Switzerland','Sweden','Spain','Austria','Luxembourg',
'Finland','Portugal','Slovenia','Slovakia','Norway','Lithunia','Estonia','Czechia','Latvia']
```
## monthly
```
x = df[df.GEO.isin(countries1)]
x.groupby('MONTH').agg({'Value':np.sum})
```
## trend
```
x = df[df.GEO.isin(countries1)]
g=sns.relplot(x='DATE',y='Value',data=x,kind='line',aspect=3,height=4,hue='GEO',estimator=np.sum,ci=None)
g.savefig('eurostat_flu_cycle.png')
```
## Plot totals up to week 26 per year for a subset of countries using catplot
```
sub = df[df.WEEK<=50]
x = sub[sub.GEO.isin(countries1)]
g=sns.catplot(x='YEAR',y='Value',data=x,kind='bar',aspect=3,hue='GEO',estimator=np.sum,ci=None)
g.fig.suptitle('total deaths up to June by year')
g.fig.savefig('eurostat_4countries_totaldeaths.png')
sns.set_context("talk")
x = sub[sub.GEO.isin(all)]
g=sns.catplot(x='YEAR',y='Value',data=x,kind='bar',aspect=1.5,col='GEO',col_wrap=4,height=4,sharey=False,estimator=np.sum,ci=None,color='lightblue')
for axes in g.axes.flat:
axes.set_xticklabels(axes.get_xticklabels(), rotation=65, horizontalalignment='center')
g.fig.savefig('eurostat_totaldeaths_bycountry.png')
x = raw[raw.GEO=='Sweden']
g=sns.catplot(x='GEO',y='Value',data=x,kind='bar',aspect=2,hue='AGE',col='YEAR',col_wrap=2,
sharey=False,estimator=np.sum,ci=None,palette='Set2')
```
## compare given period by age group
```
x = raw[raw.GEO.isin(all)]
#x = x[x.YEAR>2006]
#x = x[(x.WEEK>20) & (x.WEEK<50)]
#remove france and italy which are incomplete for older years
x = x[~x.GEO.isin(countries2)]
g=sns.catplot(x='YEAR',y='Value',data=x,kind='bar',sharey=False,aspect=5,height=2.5,row='GROUP',estimator=np.sum,ci=None,color='darkblue')
g.fig.suptitle('Eurostat Total deaths by age group, (selected countries)')
plt.subplots_adjust(top=0.9)
g.fig.savefig('eurostat_fluseason_deaths.png')
```
## covid peak shown with mean across years
```
f,ax=plt.subplots(4,2,figsize=(15,9))
axs=ax.flat
def plot_trend(x,ax):
mx = x[(x.YEAR<2020) & (x.YEAR>2014)]
sns.lineplot(x="WEEK", y="Value", data=mx,ax=ax,label='mean')
s = x[x.YEAR==2020]
sns.lineplot(x='WEEK',y='Value',data=s, color='red',ax=ax,label='2020')
s = x[x.YEAR==2021]
sns.lineplot(x='WEEK',y='Value',data=s, color='orange',ax=ax,label='2021')
ax.set_xlabel('')
ax.legend()
return
i=0
for c in countries2:
x = df[df.GEO==c]
g=plot_trend(x,ax=axs[i])
axs[i].set_title(c)
i+=1
plt.tight_layout()
f.savefig('eurostat_2020peak_trend.png')
```
## age breakdown
```
#print (oldest,old,middle,young)
cats = {'>70':oldest,'60-69':old,'50-59':middle,'<50':young}
f,ax=plt.subplots(4,1,figsize=(10,9))
axs=ax.flat
i=0
country='Austria'
for c in cats:
x = raw[(raw.GEO==country)]
x = x[x.AGE.isin(cats[c])]
x = x.groupby(['TIME','YEAR','WEEK','GEO']).agg({'Value':np.sum}).reset_index().replace(0,np.nan)
g=plot_trend(x,ax=axs[i])
axs[i].set_title(c)
i+=1
plt.tight_layout()
f.suptitle('Deaths trend')
plt.subplots_adjust(top=0.9)
f.savefig('eurostat_2020peak_trend_byage_sweden.jpg')
```
|
github_jupyter
|
import pandas as pd
import pylab as plt
import numpy as np
import seaborn as sns
raw = pd.read_csv('demo_r_mweek3_1_Data_byage.csv', thousands=',', parse_dates=['TIME'])
raw['YEAR'] = raw.TIME.str[:4].astype(int)
raw['WEEK'] = raw.TIME.str[5:].astype(int)
raw['Value'] = raw.Value.astype('float')
oldest = raw.AGE.unique()[14:19]
old = raw.AGE.unique()[12:14]
middle = raw.AGE.unique()[10:12]
young = raw.AGE.unique()[:10]
def agegroup(x):
if x in oldest:
return '>70'
elif x in old:
return '60-69'
elif x in middle:
return '50-59'
else:
return '<50'
raw['GROUP'] = raw.AGE.apply(agegroup)
raw.dtypes
df=raw.groupby(['TIME','YEAR','WEEK','GEO']).agg({'Value':np.sum}).reset_index().replace(0,np.nan)
#create the date column
df['DATE'] = pd.to_datetime(df.YEAR.astype(str), format='%Y') + \
pd.to_timedelta(df.WEEK.mul(7).astype(str) + ' days')
df['MONTH'] = df.DATE.dt.month
df
p = pd.pivot_table(df,index='DATE',columns='GEO',values='Value')
#x.columns = x.columns.get_level_values(1)
print (p.columns)
p[-3:]
countries1 = ['France','Sweden','Denmark']
countries2 = ['France','Italy','Spain','Portugal','Austria','Denmark','Sweden','Norway']
all = ['Belgium','Switzerland','Sweden','Spain','Austria','Luxembourg',
'Finland','Portugal','Slovenia','Slovakia','Norway','Lithunia','Estonia','Czechia','Latvia']
x = df[df.GEO.isin(countries1)]
x.groupby('MONTH').agg({'Value':np.sum})
x = df[df.GEO.isin(countries1)]
g=sns.relplot(x='DATE',y='Value',data=x,kind='line',aspect=3,height=4,hue='GEO',estimator=np.sum,ci=None)
g.savefig('eurostat_flu_cycle.png')
sub = df[df.WEEK<=50]
x = sub[sub.GEO.isin(countries1)]
g=sns.catplot(x='YEAR',y='Value',data=x,kind='bar',aspect=3,hue='GEO',estimator=np.sum,ci=None)
g.fig.suptitle('total deaths up to June by year')
g.fig.savefig('eurostat_4countries_totaldeaths.png')
sns.set_context("talk")
x = sub[sub.GEO.isin(all)]
g=sns.catplot(x='YEAR',y='Value',data=x,kind='bar',aspect=1.5,col='GEO',col_wrap=4,height=4,sharey=False,estimator=np.sum,ci=None,color='lightblue')
for axes in g.axes.flat:
axes.set_xticklabels(axes.get_xticklabels(), rotation=65, horizontalalignment='center')
g.fig.savefig('eurostat_totaldeaths_bycountry.png')
x = raw[raw.GEO=='Sweden']
g=sns.catplot(x='GEO',y='Value',data=x,kind='bar',aspect=2,hue='AGE',col='YEAR',col_wrap=2,
sharey=False,estimator=np.sum,ci=None,palette='Set2')
x = raw[raw.GEO.isin(all)]
#x = x[x.YEAR>2006]
#x = x[(x.WEEK>20) & (x.WEEK<50)]
#remove france and italy which are incomplete for older years
x = x[~x.GEO.isin(countries2)]
g=sns.catplot(x='YEAR',y='Value',data=x,kind='bar',sharey=False,aspect=5,height=2.5,row='GROUP',estimator=np.sum,ci=None,color='darkblue')
g.fig.suptitle('Eurostat Total deaths by age group, (selected countries)')
plt.subplots_adjust(top=0.9)
g.fig.savefig('eurostat_fluseason_deaths.png')
f,ax=plt.subplots(4,2,figsize=(15,9))
axs=ax.flat
def plot_trend(x,ax):
mx = x[(x.YEAR<2020) & (x.YEAR>2014)]
sns.lineplot(x="WEEK", y="Value", data=mx,ax=ax,label='mean')
s = x[x.YEAR==2020]
sns.lineplot(x='WEEK',y='Value',data=s, color='red',ax=ax,label='2020')
s = x[x.YEAR==2021]
sns.lineplot(x='WEEK',y='Value',data=s, color='orange',ax=ax,label='2021')
ax.set_xlabel('')
ax.legend()
return
i=0
for c in countries2:
x = df[df.GEO==c]
g=plot_trend(x,ax=axs[i])
axs[i].set_title(c)
i+=1
plt.tight_layout()
f.savefig('eurostat_2020peak_trend.png')
#print (oldest,old,middle,young)
cats = {'>70':oldest,'60-69':old,'50-59':middle,'<50':young}
f,ax=plt.subplots(4,1,figsize=(10,9))
axs=ax.flat
i=0
country='Austria'
for c in cats:
x = raw[(raw.GEO==country)]
x = x[x.AGE.isin(cats[c])]
x = x.groupby(['TIME','YEAR','WEEK','GEO']).agg({'Value':np.sum}).reset_index().replace(0,np.nan)
g=plot_trend(x,ax=axs[i])
axs[i].set_title(c)
i+=1
plt.tight_layout()
f.suptitle('Deaths trend')
plt.subplots_adjust(top=0.9)
f.savefig('eurostat_2020peak_trend_byage_sweden.jpg')
| 0.164081 | 0.866246 |
# HOME ASSIGNMENT #3: SLACK API - TO GSHEET
**Mục đích của bài Assignment**
- Lấy thông tin các Users từ Slack của DataCracy (BTC, Mentors và Learners)
- `**[Optional 1]**` Đưa danh sách Users lên Google Spreadsheet, để theo dõi
- `**[Optional 2]**` Lấy thông tin Assignment Submission và số Reviews trên `#atom-assignmentnt2` và cập nhật lên Spreadsheet, để theo dõi các học viên đã nộp bài và được review
**Các kiến thức sẽ áp dụng**
- Ôn lại và luyện tập thêm về concept API (cụ thể sử dụng API Slack)
- Trích xuất thông tin từ JSON
- Dùng module gspread để đưa thông tin lên Google Spreadsheet
## 0. Load Modules
```
import requests #-> Để gọi API
import re #-> Để xử lý data dạng string
from datetime import datetime as dt #-> Để xử lý data dạng datetime
import gspread #-> Để update data lên Google Spreadsheet
from gspread_dataframe import set_with_dataframe #-> Để update data lên Google Spreadsheet
import pandas as pd #-> Để update data dạng bản
import json
from oauth2client.service_account import ServiceAccountCredentials #-> Để nhập Google Spreadsheet Credentials
import os
```
## 1. Slack API: User List
* Bạn có thể đọc lại về concept API [HERE](https://anhdang.gitbook.io/datacracy/atom/3-data-tools-2/3.2-spotify-api-and-postman)
* Assignment này sẽ dùng Slack API để lấy thông tin về Learners và theo dõi các bài tập đã nộp và được review (sau đó cập nhật lên Google Spreadsheet)
* ===> **NOTICE**: Slack API authorize bằng Bearer Token `xoxb-...-...-...` (Sẽ được cung cấp riêng)
* Update file `env_variable.json` như trong [Assignment#2](../assignment_2/home_assignment_2.ipynb)
* ==> Nếu bạn dùng Google Colab, upload file vào Colab ([Hướng dẫn](https://colab.research.google.com/notebooks/io.ipynb))
```
!dir
with open('..\\assignment_3\env_variable.json', 'r') as j:
json_data = json.load(j)
## Load SLACK_BEARER_TOKEN
os.environ['SLACK_BEARER_TOKEN'] = json_data['SLACK_BEARER_TOKEN']
## Gọi API từ Endpoints (Input - Token được đưa vào Headers)
## Challenge: Thử gọi API này bằng Postman
endpoint = "https://slack.com/api/users.list"
headers = {"Authorization": "Bearer {}".format(os.environ['SLACK_BEARER_TOKEN'])}
response_json = requests.post(endpoint, headers=headers).json()
user_data = response_json['members']
```
### TODO #1
Hoàn tất đoạn code sau
```
## Loop qua JSON file và extract các thông tin quan trọng (id, name, display_name, real_name_normalized, title, phone, is_bot)
## Hint: Bạn có thể dùng Postman hoặc in user_data JSON để xem cấu trúc (schema), dùng Ctrl+F để tìm các keys (id, name, display_name, real_name_normalized, title, phone, is_bot)
user_dict = {'user_id':[], 'name':[], 'display_name':[],'real_name':[],'title':[],'phone':[],'is_bot':[]}
for user in user_data:
user_dict['user_id'].append(user['id'])
user_dict['name'].append(user['name'])
user_dict['display_name'].append(user['profile']['display_name'])
user_dict['real_name'].append(user['profile']['real_name_normalized'])
user_dict['title'].append(user['profile']['title'])
user_dict['phone'].append(user['profile']['phone'])
user_dict['is_bot'].append(user['is_bot'])
user_df = pd.DataFrame(user_dict) ## Dùng pandas để convert dictionaries thành bảng
user_df.head(5) ## Chỉ in 5 dòng đầu (chủ yếu để xem cấu trúc)
user_df[user_df.display_name == 'MAD'] ## Lọc thông tin của MAD, trên DataFrame (bạn có thể Google thêm)
```
-------------- HẾT PHẦN BẮT BUỘC ---------------------
## Option 1: Update data => Google SpreadSheet
### TODO#2
Tạo service account (output là file json), file này để cho phép ta access vào Google Spreadsheet:
1. Làm theo hướng dẫn: [Google Create a Service Account](https://support.google.com/a/answer/7378726?hl=en)

2. Lưu file JSON (chứa credential về máy)

3. Nhớ Enable [Google Drive API](https://console.cloud.google.com/marketplace/product/google/drive.googleapis.com?q=search&referrer=search&project=quickstart-313303) (Nếu bạn chạy code báo lỗi chưa enable API thì vào link trong phần lỗi để Enable, sau khi kích hoạt có thể cần vài phút để chạy được)

* ==> Upload file Gsheet Credential JSON nếu bạn dùng Colab
* ==> Nếu bạn để key trong repo git, **NHỚ** để file json vào `.gitignore` để không bị leaked key)
```
!dir
## Authorize bằng JSON
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
credentials = ServiceAccountCredentials.from_json_keyfile_name(
'fifth-dynamics-314309-366d9d5b8063.json', scope)
gc = gspread.authorize(credentials)
print("DONE!")
```
**Tạo Spreadsheet**
1. Tạo Spreadsheet trên google
2. Invite account trong `client_email` (file JSON Gsheet Credential bên trên) vào Spreadsheet (quyền Editor)

3. Lấy `SPREADSHEET_KEY` (nằm trong chính URL của Spreadhstee): `https://docs.google.com/spreadsheets/d/<SPREADSHEET_KEY>/edit#gid=0`

```
# ACCES GOOGLE SHEET
sheet_index_no = 0
spreadsheet_key = '1aoQDapFVNtZxqQemsO6hVs_x0DoT4kmNMIJKoeg2GpE' # input SPREADSHEET_KEY HERE
sh = gc.open_by_key(spreadsheet_key)
worksheet = sh.get_worksheet(sheet_index_no) #-> 0 - first sheet, 1 - second sheet etc.
# APPEND DATA TO SHEET
set_with_dataframe(worksheet, user_df) #-> Upload user_df vào Sheet đầu tiên trong Spreadsheet
# DONE: Bây giờ bạn có thể mở spreadsheet và kiểm tra nội dung đã update chứ
```

-------------- HẾT PHẦN OPTION 1 ---------------------
## Option 2: Ai đã nộp bài?
### Slack API: Channel List
```
## Gọi SLACK API để list tất cả các channel
endpoint = "https://slack.com/api/conversations.list"
headers = {"Authorization": "Bearer {}".format(os.environ['SLACK_BEARER_TOKEN'])}
response = requests.post(endpoint, headers=headers).json()
channel_ls = response['channels']
for channel in channel_ls:
print(channel.get('name',0))
channel_ls[0] ## Thử extract record đầu tiên để xem schema => name: general, id: C01B4PVGLVB
```
### TODO#3
* Tìm id của channel #atom-assignment2
```
for channel in channel_ls:
if channel['name'] == "atom-assignment2":
print(channel['id'])
```
### Slack API: List messages trong 1 channel
```
endpoint = "https://slack.com/api/conversations.history"
data = {"channel": "C01U6P7LZ8F"} ## This is ID of assignment#1 channel
headers = {"Authorization": "Bearer {}".format(os.environ['SLACK_BEARER_TOKEN'])}
response_json = requests.post(endpoint, data=data, headers=headers).json()
msg_ls = response_json['messages']
msg_ls[21]
not_learners_id = ['U01BE2PR6LU']
not_learners_id = ['U01BE2PR6LU'] # -> Remove MA from the user_id
github, reply_count, reply_users_count, reply_users, latest_reply = '','','','',''
for i in range(20):
ts = dt.fromtimestamp(float(msg_ls[i]['ts'])) # -> Convert timestamp Epoch thành dàng dễ đọc
user = msg_ls[i]['user'] # -> Lấy thông tin người post messages
if msg_ls[i]['user'] not in not_learners_id:
if 'attachments' in msg_ls[i].keys():
#print(msg_ls[i].keys())
text = msg_ls[i]['text']
github_link = re.findall('(?:https?://)?(?:www[.])?github[.]com/[\w-]+/?', text) #-> Submission là các message có link github
#print(msg_ls[i])
if len(github_link) > 0: github = github_link[0]
if 'reply_count' in msg_ls[i].keys(): reply_count = msg_ls[i]['reply_count'] #-> Extract số review
if 'reply_users_count' in msg_ls[i].keys(): reply_users_count = msg_ls[i]['reply_users_count']
if 'reply_users' in msg_ls[i].keys(): reply_users = msg_ls[i]['reply_users']
if 'latest_reply' in msg_ls[i].keys(): latest_reply = dt.fromtimestamp(float(msg_ls[i]['latest_reply']))
print(ts, user, reply_users_count, reply_users, latest_reply, github)
endpoint = "https://slack.com/api/conversations.history"
data = {"channel": "C01U6P7LZ8F"} ## This is ID of assignment1 channel
headers = {"Authorization": "Bearer {}".format(os.environ['SLACK_BEARER_TOKEN'])}
response_json = requests.post(endpoint, data=data, headers=headers).json()
msg_ls = response_json['messages'] # lưu tất cả message trong channel assignment1 vào msg_ls
not_learners_id = ['U01BE2PR6LU'] # -> Remove MA from the user_id
review = {'time':[],'user_id':[],'user_name':[],'reply_count':[],'reply_user_count':[],'reply_users':[],'reply_users_name':[],'lastest_reply':[],'github':[],'colab':[],'sheet':[]} # Khởi tạo review dict
for msg in msg_ls: # Loop từng tin nhắn trong assignment1
ts = dt.fromtimestamp(float(msg['ts'])) # Thời gian gửi tin nhắn
user = msg['user']
if msg['user'] not in not_learners_id: # Nếu người gửi không phải learner
text = msg['text'] # lưu nội dung tin nhắn vào text
re_github = r'(?:https?://)?(?:www[.])?github[.]com/(?:[a-zA-Z]|[0-9]|[#$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+' # Regex link Github
github_link = re.findall(re_github, text) # Tìm link Github trong tin nhắn
re_gsheet = r'(?:https?://)?(?:www[.])?docs[.]google[.]com/(?:[a-zA-Z]|[0-9]|[$#-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+' # Regex link Google Sheet
sheet_link = re.findall(re_gsheet,text) # Tìm link Gsheet trong tin nhắn
re_colab = r'(?:https?://)?(?:www[.])?colab[.]research[.]google[.]com/(?:[a-zA-Z]|[0-9]|[$#-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
colab_link = re.findall(re_colab,text) # Tìm link colab trong tin nhắn
if len(github_link) > 0 or len(sheet_link) > 0 or len(colab_link) > 0: # Nếu trong tin nhắn chứa link Github hoặc Gsheet hoặc Colab
if len(github_link) > 0: # Nếu có link Github thì lưu vào github
github = github_link[0]
else:
github = 'N/A' # Nếu không thì lưu N/A vào github
if len(sheet_link) > 0:
sheet = sheet_link[0]
else:
sheet = 'N/A'
if len(colab_link) > 0:
colab = colab_link[0]
else:
colab = 'N/A'
review['github'].append(github) # append
review['colab'].append(colab)
review['sheet'].append(sheet)
if 'reply_count' in msg.keys():
review['reply_count'].append(msg['reply_count'])
else:
review['reply_count'].append(0)
if 'reply_users_count' in msg.keys():
review['reply_user_count'].append(msg['reply_users_count'])
else:
review['reply_user_count'].append(0)
if 'reply_users' in msg.keys():
review['reply_users'].append(msg['reply_users'])
else:
review['reply_users'].append('N/A')
if 'latest_reply' in msg.keys():
review['lastest_reply'].append(dt.fromtimestamp(float(msg['latest_reply'])))
else:
review['lastest_reply'].append('N/A')
review['time'].append(ts)
review['user_id'].append(user)
for i in range(len(user_dict['user_id'])):
if user_dict['user_id'][i] == user:
review['user_name'].append(user_dict['real_name'][i])
if 'reply_users' in msg.keys():
l=[]
for user in msg['reply_users']:
for i in range(len(user_dict['user_id'])):
if user_dict['user_id'][i] == user:
l.append(user_dict['real_name'][i])
review['reply_users_name'].append(l)
else:
review['reply_users_name'].append('N/A')
reply = pd.DataFrame(review)
sh = gc.open_by_key(spreadsheet_key)
worksheet = sh.get_worksheet(1)
set_with_dataframe(worksheet,reply)
for channel in channel_ls:
if channel['name'] == "atom-assignment2":
print(channel['id'])
endpoint = "https://slack.com/api/conversations.history"
data = {"channel": "C021FSDN7LJ"} ## This is ID of assignment1 channel
headers = {"Authorization": "Bearer {}".format(os.environ['SLACK_BEARER_TOKEN'])}
response_json = requests.post(endpoint, data=data, headers=headers).json()
msg_ls = response_json['messages'] # lưu tất cả message trong channel assignment1 vào msg_ls
not_learners_id = ['U01BE2PR6LU'] # -> Remove MA from the user_id
review = {'time':[],'user_id':[],'user_name':[],'reply_count':[],'reply_user_count':[],'reply_users':[],'reply_users_name':[],'lastest_reply':[],'github':[],'colab':[],'sheet':[]} # Khởi tạo review dict
for msg in msg_ls: # Loop từng tin nhắn trong assignment1
ts = dt.fromtimestamp(float(msg['ts'])) # Thời gian gửi tin nhắn
user = msg['user']
if msg['user'] not in not_learners_id: # Nếu người gửi không phải learner
text = msg['text'] # lưu nội dung tin nhắn vào text
re_github = r'(?:https?://)?(?:www[.])?github[.]com/(?:[a-zA-Z]|[0-9]|[#$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+' # Regex link Github
github_link = re.findall(re_github, text) # Tìm link Github trong tin nhắn
re_gsheet = r'(?:https?://)?(?:www[.])?docs[.]google[.]com/(?:[a-zA-Z]|[0-9]|[$#-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+' # Regex link Google Sheet
sheet_link = re.findall(re_gsheet,text) # Tìm link Gsheet trong tin nhắn
re_colab = r'(?:https?://)?(?:www[.])?colab[.]research[.]google[.]com/(?:[a-zA-Z]|[0-9]|[$#-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
colab_link = re.findall(re_colab,text) # Tìm link colab trong tin nhắn
if len(github_link) > 0 or len(sheet_link) > 0 or len(colab_link) > 0: # Nếu trong tin nhắn chứa link Github hoặc Gsheet hoặc Colab
if len(github_link) > 0: # Nếu có link Github thì lưu vào github
github = github_link[0]
else:
github = 'N/A' # Nếu không thì lưu N/A vào github
if len(sheet_link) > 0:
sheet = sheet_link[0]
else:
sheet = 'N/A'
if len(colab_link) > 0:
colab = colab_link[0]
else:
colab = 'N/A'
review['github'].append(github) # append
review['colab'].append(colab)
review['sheet'].append(sheet)
if 'reply_count' in msg.keys():
review['reply_count'].append(msg['reply_count'])
else:
review['reply_count'].append(0)
if 'reply_users_count' in msg.keys():
review['reply_user_count'].append(msg['reply_users_count'])
else:
review['reply_user_count'].append(0)
if 'reply_users' in msg.keys():
review['reply_users'].append(msg['reply_users'])
else:
review['reply_users'].append('N/A')
if 'latest_reply' in msg.keys():
review['lastest_reply'].append(dt.fromtimestamp(float(msg['latest_reply'])))
else:
review['lastest_reply'].append('N/A')
review['time'].append(ts)
review['user_id'].append(user)
for i in range(len(user_dict['user_id'])):
if user_dict['user_id'][i] == user:
review['user_name'].append(user_dict['real_name'][i])
if 'reply_users' in msg.keys():
l=[]
for user in msg['reply_users']:
for i in range(len(user_dict['user_id'])):
if user_dict['user_id'][i] == user:
l.append(user_dict['real_name'][i])
review['reply_users_name'].append(l)
else:
review['reply_users_name'].append('N/A')
reply = pd.DataFrame(review)
sh = gc.open_by_key(spreadsheet_key)
worksheet = sh.get_worksheet(2)
set_with_dataframe(worksheet,reply)
```
### TODO#4
* Tạo thành 1 bảng chứa các thông tin trên và update lên Spreadsheet (Sheet: Assignment#2 Submission)
-------------- HẾT PHẦN OPTION 2 ---------------------
|
github_jupyter
|
import requests #-> Để gọi API
import re #-> Để xử lý data dạng string
from datetime import datetime as dt #-> Để xử lý data dạng datetime
import gspread #-> Để update data lên Google Spreadsheet
from gspread_dataframe import set_with_dataframe #-> Để update data lên Google Spreadsheet
import pandas as pd #-> Để update data dạng bản
import json
from oauth2client.service_account import ServiceAccountCredentials #-> Để nhập Google Spreadsheet Credentials
import os
!dir
with open('..\\assignment_3\env_variable.json', 'r') as j:
json_data = json.load(j)
## Load SLACK_BEARER_TOKEN
os.environ['SLACK_BEARER_TOKEN'] = json_data['SLACK_BEARER_TOKEN']
## Gọi API từ Endpoints (Input - Token được đưa vào Headers)
## Challenge: Thử gọi API này bằng Postman
endpoint = "https://slack.com/api/users.list"
headers = {"Authorization": "Bearer {}".format(os.environ['SLACK_BEARER_TOKEN'])}
response_json = requests.post(endpoint, headers=headers).json()
user_data = response_json['members']
## Loop qua JSON file và extract các thông tin quan trọng (id, name, display_name, real_name_normalized, title, phone, is_bot)
## Hint: Bạn có thể dùng Postman hoặc in user_data JSON để xem cấu trúc (schema), dùng Ctrl+F để tìm các keys (id, name, display_name, real_name_normalized, title, phone, is_bot)
user_dict = {'user_id':[], 'name':[], 'display_name':[],'real_name':[],'title':[],'phone':[],'is_bot':[]}
for user in user_data:
user_dict['user_id'].append(user['id'])
user_dict['name'].append(user['name'])
user_dict['display_name'].append(user['profile']['display_name'])
user_dict['real_name'].append(user['profile']['real_name_normalized'])
user_dict['title'].append(user['profile']['title'])
user_dict['phone'].append(user['profile']['phone'])
user_dict['is_bot'].append(user['is_bot'])
user_df = pd.DataFrame(user_dict) ## Dùng pandas để convert dictionaries thành bảng
user_df.head(5) ## Chỉ in 5 dòng đầu (chủ yếu để xem cấu trúc)
user_df[user_df.display_name == 'MAD'] ## Lọc thông tin của MAD, trên DataFrame (bạn có thể Google thêm)
!dir
## Authorize bằng JSON
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
credentials = ServiceAccountCredentials.from_json_keyfile_name(
'fifth-dynamics-314309-366d9d5b8063.json', scope)
gc = gspread.authorize(credentials)
print("DONE!")
# ACCES GOOGLE SHEET
sheet_index_no = 0
spreadsheet_key = '1aoQDapFVNtZxqQemsO6hVs_x0DoT4kmNMIJKoeg2GpE' # input SPREADSHEET_KEY HERE
sh = gc.open_by_key(spreadsheet_key)
worksheet = sh.get_worksheet(sheet_index_no) #-> 0 - first sheet, 1 - second sheet etc.
# APPEND DATA TO SHEET
set_with_dataframe(worksheet, user_df) #-> Upload user_df vào Sheet đầu tiên trong Spreadsheet
# DONE: Bây giờ bạn có thể mở spreadsheet và kiểm tra nội dung đã update chứ
## Gọi SLACK API để list tất cả các channel
endpoint = "https://slack.com/api/conversations.list"
headers = {"Authorization": "Bearer {}".format(os.environ['SLACK_BEARER_TOKEN'])}
response = requests.post(endpoint, headers=headers).json()
channel_ls = response['channels']
for channel in channel_ls:
print(channel.get('name',0))
channel_ls[0] ## Thử extract record đầu tiên để xem schema => name: general, id: C01B4PVGLVB
for channel in channel_ls:
if channel['name'] == "atom-assignment2":
print(channel['id'])
endpoint = "https://slack.com/api/conversations.history"
data = {"channel": "C01U6P7LZ8F"} ## This is ID of assignment#1 channel
headers = {"Authorization": "Bearer {}".format(os.environ['SLACK_BEARER_TOKEN'])}
response_json = requests.post(endpoint, data=data, headers=headers).json()
msg_ls = response_json['messages']
msg_ls[21]
not_learners_id = ['U01BE2PR6LU']
not_learners_id = ['U01BE2PR6LU'] # -> Remove MA from the user_id
github, reply_count, reply_users_count, reply_users, latest_reply = '','','','',''
for i in range(20):
ts = dt.fromtimestamp(float(msg_ls[i]['ts'])) # -> Convert timestamp Epoch thành dàng dễ đọc
user = msg_ls[i]['user'] # -> Lấy thông tin người post messages
if msg_ls[i]['user'] not in not_learners_id:
if 'attachments' in msg_ls[i].keys():
#print(msg_ls[i].keys())
text = msg_ls[i]['text']
github_link = re.findall('(?:https?://)?(?:www[.])?github[.]com/[\w-]+/?', text) #-> Submission là các message có link github
#print(msg_ls[i])
if len(github_link) > 0: github = github_link[0]
if 'reply_count' in msg_ls[i].keys(): reply_count = msg_ls[i]['reply_count'] #-> Extract số review
if 'reply_users_count' in msg_ls[i].keys(): reply_users_count = msg_ls[i]['reply_users_count']
if 'reply_users' in msg_ls[i].keys(): reply_users = msg_ls[i]['reply_users']
if 'latest_reply' in msg_ls[i].keys(): latest_reply = dt.fromtimestamp(float(msg_ls[i]['latest_reply']))
print(ts, user, reply_users_count, reply_users, latest_reply, github)
endpoint = "https://slack.com/api/conversations.history"
data = {"channel": "C01U6P7LZ8F"} ## This is ID of assignment1 channel
headers = {"Authorization": "Bearer {}".format(os.environ['SLACK_BEARER_TOKEN'])}
response_json = requests.post(endpoint, data=data, headers=headers).json()
msg_ls = response_json['messages'] # lưu tất cả message trong channel assignment1 vào msg_ls
not_learners_id = ['U01BE2PR6LU'] # -> Remove MA from the user_id
review = {'time':[],'user_id':[],'user_name':[],'reply_count':[],'reply_user_count':[],'reply_users':[],'reply_users_name':[],'lastest_reply':[],'github':[],'colab':[],'sheet':[]} # Khởi tạo review dict
for msg in msg_ls: # Loop từng tin nhắn trong assignment1
ts = dt.fromtimestamp(float(msg['ts'])) # Thời gian gửi tin nhắn
user = msg['user']
if msg['user'] not in not_learners_id: # Nếu người gửi không phải learner
text = msg['text'] # lưu nội dung tin nhắn vào text
re_github = r'(?:https?://)?(?:www[.])?github[.]com/(?:[a-zA-Z]|[0-9]|[#$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+' # Regex link Github
github_link = re.findall(re_github, text) # Tìm link Github trong tin nhắn
re_gsheet = r'(?:https?://)?(?:www[.])?docs[.]google[.]com/(?:[a-zA-Z]|[0-9]|[$#-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+' # Regex link Google Sheet
sheet_link = re.findall(re_gsheet,text) # Tìm link Gsheet trong tin nhắn
re_colab = r'(?:https?://)?(?:www[.])?colab[.]research[.]google[.]com/(?:[a-zA-Z]|[0-9]|[$#-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
colab_link = re.findall(re_colab,text) # Tìm link colab trong tin nhắn
if len(github_link) > 0 or len(sheet_link) > 0 or len(colab_link) > 0: # Nếu trong tin nhắn chứa link Github hoặc Gsheet hoặc Colab
if len(github_link) > 0: # Nếu có link Github thì lưu vào github
github = github_link[0]
else:
github = 'N/A' # Nếu không thì lưu N/A vào github
if len(sheet_link) > 0:
sheet = sheet_link[0]
else:
sheet = 'N/A'
if len(colab_link) > 0:
colab = colab_link[0]
else:
colab = 'N/A'
review['github'].append(github) # append
review['colab'].append(colab)
review['sheet'].append(sheet)
if 'reply_count' in msg.keys():
review['reply_count'].append(msg['reply_count'])
else:
review['reply_count'].append(0)
if 'reply_users_count' in msg.keys():
review['reply_user_count'].append(msg['reply_users_count'])
else:
review['reply_user_count'].append(0)
if 'reply_users' in msg.keys():
review['reply_users'].append(msg['reply_users'])
else:
review['reply_users'].append('N/A')
if 'latest_reply' in msg.keys():
review['lastest_reply'].append(dt.fromtimestamp(float(msg['latest_reply'])))
else:
review['lastest_reply'].append('N/A')
review['time'].append(ts)
review['user_id'].append(user)
for i in range(len(user_dict['user_id'])):
if user_dict['user_id'][i] == user:
review['user_name'].append(user_dict['real_name'][i])
if 'reply_users' in msg.keys():
l=[]
for user in msg['reply_users']:
for i in range(len(user_dict['user_id'])):
if user_dict['user_id'][i] == user:
l.append(user_dict['real_name'][i])
review['reply_users_name'].append(l)
else:
review['reply_users_name'].append('N/A')
reply = pd.DataFrame(review)
sh = gc.open_by_key(spreadsheet_key)
worksheet = sh.get_worksheet(1)
set_with_dataframe(worksheet,reply)
for channel in channel_ls:
if channel['name'] == "atom-assignment2":
print(channel['id'])
endpoint = "https://slack.com/api/conversations.history"
data = {"channel": "C021FSDN7LJ"} ## This is ID of assignment1 channel
headers = {"Authorization": "Bearer {}".format(os.environ['SLACK_BEARER_TOKEN'])}
response_json = requests.post(endpoint, data=data, headers=headers).json()
msg_ls = response_json['messages'] # lưu tất cả message trong channel assignment1 vào msg_ls
not_learners_id = ['U01BE2PR6LU'] # -> Remove MA from the user_id
review = {'time':[],'user_id':[],'user_name':[],'reply_count':[],'reply_user_count':[],'reply_users':[],'reply_users_name':[],'lastest_reply':[],'github':[],'colab':[],'sheet':[]} # Khởi tạo review dict
for msg in msg_ls: # Loop từng tin nhắn trong assignment1
ts = dt.fromtimestamp(float(msg['ts'])) # Thời gian gửi tin nhắn
user = msg['user']
if msg['user'] not in not_learners_id: # Nếu người gửi không phải learner
text = msg['text'] # lưu nội dung tin nhắn vào text
re_github = r'(?:https?://)?(?:www[.])?github[.]com/(?:[a-zA-Z]|[0-9]|[#$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+' # Regex link Github
github_link = re.findall(re_github, text) # Tìm link Github trong tin nhắn
re_gsheet = r'(?:https?://)?(?:www[.])?docs[.]google[.]com/(?:[a-zA-Z]|[0-9]|[$#-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+' # Regex link Google Sheet
sheet_link = re.findall(re_gsheet,text) # Tìm link Gsheet trong tin nhắn
re_colab = r'(?:https?://)?(?:www[.])?colab[.]research[.]google[.]com/(?:[a-zA-Z]|[0-9]|[$#-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
colab_link = re.findall(re_colab,text) # Tìm link colab trong tin nhắn
if len(github_link) > 0 or len(sheet_link) > 0 or len(colab_link) > 0: # Nếu trong tin nhắn chứa link Github hoặc Gsheet hoặc Colab
if len(github_link) > 0: # Nếu có link Github thì lưu vào github
github = github_link[0]
else:
github = 'N/A' # Nếu không thì lưu N/A vào github
if len(sheet_link) > 0:
sheet = sheet_link[0]
else:
sheet = 'N/A'
if len(colab_link) > 0:
colab = colab_link[0]
else:
colab = 'N/A'
review['github'].append(github) # append
review['colab'].append(colab)
review['sheet'].append(sheet)
if 'reply_count' in msg.keys():
review['reply_count'].append(msg['reply_count'])
else:
review['reply_count'].append(0)
if 'reply_users_count' in msg.keys():
review['reply_user_count'].append(msg['reply_users_count'])
else:
review['reply_user_count'].append(0)
if 'reply_users' in msg.keys():
review['reply_users'].append(msg['reply_users'])
else:
review['reply_users'].append('N/A')
if 'latest_reply' in msg.keys():
review['lastest_reply'].append(dt.fromtimestamp(float(msg['latest_reply'])))
else:
review['lastest_reply'].append('N/A')
review['time'].append(ts)
review['user_id'].append(user)
for i in range(len(user_dict['user_id'])):
if user_dict['user_id'][i] == user:
review['user_name'].append(user_dict['real_name'][i])
if 'reply_users' in msg.keys():
l=[]
for user in msg['reply_users']:
for i in range(len(user_dict['user_id'])):
if user_dict['user_id'][i] == user:
l.append(user_dict['real_name'][i])
review['reply_users_name'].append(l)
else:
review['reply_users_name'].append('N/A')
reply = pd.DataFrame(review)
sh = gc.open_by_key(spreadsheet_key)
worksheet = sh.get_worksheet(2)
set_with_dataframe(worksheet,reply)
| 0.169509 | 0.767472 |
# N-charge system
- The electric field for a single charge is given by:
\\( E = q * \frac{\hat{r}}{r} \\)
- The electric Potential: \\( V = q * \frac{1}{r} \\).
- In cartesian coordinate: \\( \frac{1}{r} = \frac{1}{\sqrt{((x-x^{'})^{2} + (y - y^{'})^{2})}} \\).
```
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import seaborn as sns
sns.set()
```
#### Class Charge
Lets create a class ```Charge``` with function ```line``` to calculate the distance between source and field point and function ```V_point_charge``` to calculate the electric potential ata fileld point ```x,y``` due to source poin at ```pos```.
```
class Charge(object):
'''Data incapsulation'''
def __init__(self, q, pos):
self.q = q
self.pos = pos
def line(self, x,y):
'''create a vector from charge to observation point'''
self.vector = [x-self.pos[0],y-self.pos[1]]
'''norm of the vector'''
self.norm = np.sqrt((self.vector[0])**2+(self.vector[1])**2)
def V_point_charge(self, x, y):
'''recall length'''
self.line(x,y)
'''Make sure to exclude source itself'''
if self.norm > 0:
self.V = self.q/self.norm
'''if length is zero, set V equal to 0'''
elif self.norm == 0:
self.V = 0
return self.V
(2+4)/123, 2+4/123
C = Charge()
```
#### Example :
Lets use charge ```q = 100``` at posiotion ```x =1``` and ```y =1``` to find electric potential at different points in 2D
```
C = Charge(100, [1,1])
for x in range(3):
for y in range(3):
print(x,y, "|", C.V_point_charge(x, y))
```
#### Total Electric potential
Total electric potential at a point ```x,y``` is the sum of contribution of all charges defined in class ```Charge```.
```
def V_total(x, y, charges):
V = 0
for C in charges:
Vp = C.V_point_charge(x, y)
V = V+Vp
return V
```
- Example: Lets use collection of charges to find a electric potential at point x = 4, y =4
```
sample_charges = [Charge(q = 20, pos = [23,34]),
Charge(q = 25, pos = [13,48]),
Charge(q = 40, pos = [3,14]),
Charge(q = 80, pos = [88,60])]
V_total(x=4, y=4, charges = sample_charges)
```
#### Lattice of charges (```scatter```)
Now, we are going to implement ```Charge``` class to define charge distribution and calculate electric potential at several places.
- To create a lattice of charges.
```
'''first charge to be at x=1,y=1'''
q = 100
'''Dictionary to collect charges, x and y xoordinates'''
Qd = []
'''List to collect Charge objects'''
charges = []
'''use for loops to construct collection of charges objects'''
for i in range(5):
for j in range(5):
'''Collecting charges and their coordinates'''
Qd.append({"q": q, "x": i*20, "y":j*20})
'''charge objects are being collected'''
charges.append(Charge(q , [20*i, 20*j]))
'''change the sign of charge alternatly'''
q = -q
```
- To visualize lattice of charges
```
'''Plot the lattice of charges'''
plt.figure(figsize = [8,6])
for item in Qd:
'''Sctaeer as red dot if charge is positive'''
if item['q']> 0:
plt.scatter(item['x'], item['y'], c = 'r',s =100)
'''Scatter as blue dot if charge is negative'''
else:
plt.scatter(item['x'], item['y'], c = 'b',s =100)
plt.xlabel("X-axis")
plt.ylabel("Y-asis")
plt.show()
```
#### Electric Potential (```heatmap```)
- To find Electric Potential at several points due to lattice of charges
```
'''Create X and Y coordinate'''
X = np.arange(-10,110,2)
Y = np.arange(-10,110,2)
'''Initiate vacant V-list of list'''
V = [[0.0 for i in range(len(X))] for j in range(len(Y))]
'''Calculate Electric potential at each x,y coordinate'''
for i,x in enumerate(X):
for j,y in enumerate(Y):
v = V_total(x, y, charges)
V[i][j] = v
VV = np.array(V)
```
- To plot Electric potential
```
plt.figure(figsize = [12,10])
sns.heatmap(VV,annot=False,cmap='YlGnBu')
plt.xlabel("X-axis")
plt.ylabel("Y-axis")
plt.title("Electric potential of lattice of charges")
plt.show()
```
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import seaborn as sns
sns.set()
class Charge(object):
'''Data incapsulation'''
def __init__(self, q, pos):
self.q = q
self.pos = pos
def line(self, x,y):
'''create a vector from charge to observation point'''
self.vector = [x-self.pos[0],y-self.pos[1]]
'''norm of the vector'''
self.norm = np.sqrt((self.vector[0])**2+(self.vector[1])**2)
def V_point_charge(self, x, y):
'''recall length'''
self.line(x,y)
'''Make sure to exclude source itself'''
if self.norm > 0:
self.V = self.q/self.norm
'''if length is zero, set V equal to 0'''
elif self.norm == 0:
self.V = 0
return self.V
(2+4)/123, 2+4/123
C = Charge()
C = Charge(100, [1,1])
for x in range(3):
for y in range(3):
print(x,y, "|", C.V_point_charge(x, y))
def V_total(x, y, charges):
V = 0
for C in charges:
Vp = C.V_point_charge(x, y)
V = V+Vp
return V
sample_charges = [Charge(q = 20, pos = [23,34]),
Charge(q = 25, pos = [13,48]),
Charge(q = 40, pos = [3,14]),
Charge(q = 80, pos = [88,60])]
V_total(x=4, y=4, charges = sample_charges)
'''first charge to be at x=1,y=1'''
q = 100
'''Dictionary to collect charges, x and y xoordinates'''
Qd = []
'''List to collect Charge objects'''
charges = []
'''use for loops to construct collection of charges objects'''
for i in range(5):
for j in range(5):
'''Collecting charges and their coordinates'''
Qd.append({"q": q, "x": i*20, "y":j*20})
'''charge objects are being collected'''
charges.append(Charge(q , [20*i, 20*j]))
'''change the sign of charge alternatly'''
q = -q
'''Plot the lattice of charges'''
plt.figure(figsize = [8,6])
for item in Qd:
'''Sctaeer as red dot if charge is positive'''
if item['q']> 0:
plt.scatter(item['x'], item['y'], c = 'r',s =100)
'''Scatter as blue dot if charge is negative'''
else:
plt.scatter(item['x'], item['y'], c = 'b',s =100)
plt.xlabel("X-axis")
plt.ylabel("Y-asis")
plt.show()
'''Create X and Y coordinate'''
X = np.arange(-10,110,2)
Y = np.arange(-10,110,2)
'''Initiate vacant V-list of list'''
V = [[0.0 for i in range(len(X))] for j in range(len(Y))]
'''Calculate Electric potential at each x,y coordinate'''
for i,x in enumerate(X):
for j,y in enumerate(Y):
v = V_total(x, y, charges)
V[i][j] = v
VV = np.array(V)
plt.figure(figsize = [12,10])
sns.heatmap(VV,annot=False,cmap='YlGnBu')
plt.xlabel("X-axis")
plt.ylabel("Y-axis")
plt.title("Electric potential of lattice of charges")
plt.show()
| 0.440469 | 0.987092 |
```
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import emcee, corner
import starry
import astropy.units as u
from astropy.modeling.blackbody import blackbody_lambda
# Planet dayside effective temperature
teff = 2349 * u.K
wl_irac1, tr_irac1 = np.loadtxt('filters/Spitzer_IRAC.I1.txt', unpack=True)
wl_irac2, tr_irac2 = np.loadtxt('filters/Spitzer_IRAC.I2.txt', unpack=True)
wl_irac1 /= 10000
wl_irac2 /= 10000
wl_irac1 = wl_irac1 * u.um
wl_irac2 = wl_irac2 * u.um
wl = np.linspace(3, 5.5, 100) * u.um
bb = blackbody_lambda(wl, teff) * wl
plt.plot(wl_irac1, tr_irac1, label='IRAC 1')
plt.plot(wl_irac2, tr_irac2, label='IRAC 2')
plt.plot(wl, bb/bb.max(), color='k', label='BB = {0}'.format(teff))
plt.legend()
plt.axvline(3.15)
plt.axvline(3.95)
plt.axvline(4.0)
plt.axvline(5.0)
plt.grid()
plt.show()
flux_irac2 = np.trapz(blackbody_lambda(wl_irac2, teff), wl_irac2)
flux_irac1 = np.trapz(blackbody_lambda(wl_irac1, teff), wl_irac1)
flux_ratio = flux_irac1 / flux_irac2
print(flux_ratio)
lmax = 1
lambda0 = 90
r = 0.0187
L_45 = 197e-6 * 1.1/2
L_36 = flux_ratio * L_45
inc = 83.3
a = 3.517
porb = 0.736539
prot = porb
tref = -porb / 2.
# Instantiate the star
star = starry.kepler.Primary()
star[1] = 0.0783
star[2] = 0.1407
# Instantiate the planet
planet = starry.kepler.Secondary(lmax=lmax)
planet.lambda0 = lambda0
planet.r = r
planet.L = L
planet.inc = inc
planet.a = a
planet.prot = prot
planet.porb = porb
planet.tref = tref
# Instantiate the system
system = starry.kepler.System(star, planet)
def set_coeffs(p, planet):
"""Set the coefficients of the planet map."""
y1m1, y10, y11, L = p
planet[1, :] = [y1m1, y10, y11]
planet.L = L
def hotspot_offset(p):
"""Calculate the latitude and longitude of the hot spot in degrees"""
x = p[2] / np.sqrt(np.sum(p[:3] ** 2))
y = p[0] / np.sqrt(np.sum(p[:3] ** 2))
z = p[1] / np.sqrt(np.sum(p[:3] ** 2))
lat = np.degrees(np.arcsin(y))
lon = np.degrees(np.arccos(z / np.sqrt(1 - y ** 2)))
return lat, lon
norm = 1
p0 = 0
p1 = 0.53
p2 = 1 - p1
times = np.linspace(-3/4*porb, 1/4*porb, 1000)
for L, label, ls in zip([L_36, L_45],
["3.6 $\mu$m", "4.5 $\mu$m"],
['-', '--']):
c = np.array([p0, p1, p2, L])
set_coeffs(c, planet)
print("Hot spot (lat, lon):", hotspot_offset(c))
system.compute(times)
flux = system.lightcurve
mid_eclipse_flux = flux[int(3/4*len(times))]
flux = 1e6 * (flux - mid_eclipse_flux)
print("amp:", flux.max())
plt.plot(times / planet.porb + 0.5, flux, label=label, ls=ls)
plt.legend()
plt.xlabel('Phase')
plt.ylabel('Flux [ppm]')
for s in ['right', 'top']:
plt.gca().spines[s].set_visible(False)
plt.grid(ls='--')
plt.savefig('plots/predicted_flux.pdf', bbox_inches='tight', dpi=300)
plt.show()
planet.show()
```
|
github_jupyter
|
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import emcee, corner
import starry
import astropy.units as u
from astropy.modeling.blackbody import blackbody_lambda
# Planet dayside effective temperature
teff = 2349 * u.K
wl_irac1, tr_irac1 = np.loadtxt('filters/Spitzer_IRAC.I1.txt', unpack=True)
wl_irac2, tr_irac2 = np.loadtxt('filters/Spitzer_IRAC.I2.txt', unpack=True)
wl_irac1 /= 10000
wl_irac2 /= 10000
wl_irac1 = wl_irac1 * u.um
wl_irac2 = wl_irac2 * u.um
wl = np.linspace(3, 5.5, 100) * u.um
bb = blackbody_lambda(wl, teff) * wl
plt.plot(wl_irac1, tr_irac1, label='IRAC 1')
plt.plot(wl_irac2, tr_irac2, label='IRAC 2')
plt.plot(wl, bb/bb.max(), color='k', label='BB = {0}'.format(teff))
plt.legend()
plt.axvline(3.15)
plt.axvline(3.95)
plt.axvline(4.0)
plt.axvline(5.0)
plt.grid()
plt.show()
flux_irac2 = np.trapz(blackbody_lambda(wl_irac2, teff), wl_irac2)
flux_irac1 = np.trapz(blackbody_lambda(wl_irac1, teff), wl_irac1)
flux_ratio = flux_irac1 / flux_irac2
print(flux_ratio)
lmax = 1
lambda0 = 90
r = 0.0187
L_45 = 197e-6 * 1.1/2
L_36 = flux_ratio * L_45
inc = 83.3
a = 3.517
porb = 0.736539
prot = porb
tref = -porb / 2.
# Instantiate the star
star = starry.kepler.Primary()
star[1] = 0.0783
star[2] = 0.1407
# Instantiate the planet
planet = starry.kepler.Secondary(lmax=lmax)
planet.lambda0 = lambda0
planet.r = r
planet.L = L
planet.inc = inc
planet.a = a
planet.prot = prot
planet.porb = porb
planet.tref = tref
# Instantiate the system
system = starry.kepler.System(star, planet)
def set_coeffs(p, planet):
"""Set the coefficients of the planet map."""
y1m1, y10, y11, L = p
planet[1, :] = [y1m1, y10, y11]
planet.L = L
def hotspot_offset(p):
"""Calculate the latitude and longitude of the hot spot in degrees"""
x = p[2] / np.sqrt(np.sum(p[:3] ** 2))
y = p[0] / np.sqrt(np.sum(p[:3] ** 2))
z = p[1] / np.sqrt(np.sum(p[:3] ** 2))
lat = np.degrees(np.arcsin(y))
lon = np.degrees(np.arccos(z / np.sqrt(1 - y ** 2)))
return lat, lon
norm = 1
p0 = 0
p1 = 0.53
p2 = 1 - p1
times = np.linspace(-3/4*porb, 1/4*porb, 1000)
for L, label, ls in zip([L_36, L_45],
["3.6 $\mu$m", "4.5 $\mu$m"],
['-', '--']):
c = np.array([p0, p1, p2, L])
set_coeffs(c, planet)
print("Hot spot (lat, lon):", hotspot_offset(c))
system.compute(times)
flux = system.lightcurve
mid_eclipse_flux = flux[int(3/4*len(times))]
flux = 1e6 * (flux - mid_eclipse_flux)
print("amp:", flux.max())
plt.plot(times / planet.porb + 0.5, flux, label=label, ls=ls)
plt.legend()
plt.xlabel('Phase')
plt.ylabel('Flux [ppm]')
for s in ['right', 'top']:
plt.gca().spines[s].set_visible(False)
plt.grid(ls='--')
plt.savefig('plots/predicted_flux.pdf', bbox_inches='tight', dpi=300)
plt.show()
planet.show()
| 0.729038 | 0.656135 |
# TensorFlow Distributed Training & Inference
For use cases involving large datasets, particularly those where the data is images, it often is necessary to perform distributed training on a cluster of multiple machines. Similarly, when it is time to set up an inference workflow, it also may be necessary to perform highly performant batch inference using a cluster. In this notebook, we'll examine distributed training and distributed inference with TensorFlow in Amazon SageMaker.
The model used for this notebook is a basic Convolutional Neural Network (CNN) based on [the Keras examples](https://github.com/keras-team/keras/blob/master/examples/cifar10_cnn.py). We'll train the CNN to classify images using the [CIFAR-10 dataset](https://www.cs.toronto.edu/~kriz/cifar.html), a well-known computer vision dataset. It consists of 60,000 32x32 images belonging to 10 different classes (6,000 images per class). Here is a graphic of the classes in the dataset, as well as 10 random images from each:

## Setup
We'll begin with some necessary imports, and get an Amazon SageMaker session to help perform certain tasks, as well as an IAM role with the necessary permissions.
```
%matplotlib inline
import numpy as np
import os
import sagemaker
from sagemaker import get_execution_role
os.system("aws s3 cp s3://sagemaker-workshop-pdx/cifar-10-module . --recursive")
sagemaker_session = sagemaker.Session()
role = get_execution_role()
bucket = sagemaker_session.default_bucket()
prefix = 'sagemaker/DEMO-tf-horovod-inference'
print('Bucket:\n{}'.format(bucket))
```
Now we'll run a script that fetches the dataset and converts it to the TFRecord format, which provides several conveniences for training models in TensorFlow.
```
!python generate_cifar10_tfrecords.py --data-dir ./data
```
For Amazon SageMaker hosted training on a cluster separate from this notebook instance, training data must be stored in Amazon S3, so we'll upload the data to S3 now.
```
inputs = sagemaker_session.upload_data(path='data', key_prefix='data/DEMO-cifar10-tf')
display(inputs)
```
## Distributed training with Horovod
Sometimes it makes sense to perform training on a single machine. For large datasets, however, it may be necessary to perform distributed training on a cluster of multiple machines. In fact, it may be not only faster but cheaper to do distributed training on several machines rather than one machine. Fortunately, Amazon SageMaker makes it easy to run distributed training without having to manage cluster setup and tear down. Distributed training can be done on a cluster of multiple machines using either parameter servers or Ring-AllReduce with Horovod.
Horovod is an open source distributed training framework for TensorFlow, Keras, PyTorch, and MXNet. It is an alternative to the more "traditional" parameter server method of performing distributed training. In Amazon SageMaker, Horovod is only available with TensorFlow version 1.12 or newer. Only a few lines of code are necessary to use Horovod for distributed training of a Keras model defined by the tf.keras API. For details, see the `train.py` script included with this notebook; the changes primarily relate to:
- importing Horovod.
- initializing Horovod.
- configuring GPU options and setting a Keras/tf.session with those options.
Once we have a training script, the next step is to set up an Amazon SageMaker TensorFlow Estimator object with the details of the training job. It is very similar to an Estimator for training on a single machine, except we specify a `distributions` parameter describing Horovod attributes such as the number of process per host, which is set here to the number of GPUs per machine. Beyond these few simple parameters and the few lines of code in the training script, there is nothing else you need to do to use distributed training with Horovod; Amazon SageMaker handles the heavy lifting for you and manages the underlying cluster setup.
```
from sagemaker.tensorflow import TensorFlow
hvd_instance_type = 'ml.p3.8xlarge'
hvd_processes_per_host = 4
hvd_instance_count = 2
distributions = {'mpi': {
'enabled': True,
'processes_per_host': hvd_processes_per_host
}
}
hyperparameters = {'epochs': 60, 'batch-size' : 256}
estimator_dist = TensorFlow(base_job_name='dist-cifar10-tf',
source_dir='code',
entry_point='train.py',
role=role,
framework_version='1.12.0',
py_version='py3',
hyperparameters=hyperparameters,
train_instance_count=hvd_instance_count,
train_instance_type=hvd_instance_type,
tags = [{'Key' : 'Project', 'Value' : 'cifar10'},{'Key' : 'TensorBoard', 'Value' : 'dist'}],
distributions=distributions)
```
Now we can call the `fit` method of the Estimator object to start training. After training completes, the tf.keras model will be saved in the SavedModel .pb format so it can be served by a TensorFlow Serving container. Note that the model is only saved by the the master, rank = 0 process (disregard any warnings about the model not being saved by all the processes).
```
remote_inputs = {'train' : inputs+'/train', 'validation' : inputs+'/validation', 'eval' : inputs+'/eval'}
estimator_dist.fit(remote_inputs, wait=True)
```
## Model Deployment with Amazon Elastic Inference
Amazon SageMaker provides both real time inference and batch inference. Although we will focus on batch inference below, let's start with a quick overview of setting up an Amazon SageMaker hosted endpoint for real time inference with TensorFlow Serving (TFS). The processes for setting up hosted endpoints and Batch Transform jobs have significant differences. Additionally, we will discuss why and how to use Amazon Elastic Inference with the hosted endpoint.
### Deploying the Model
When considering the overall cost of a machine learning workload, inference often is the largest part, up to 90% of the total. If a GPU instance type is used for real time inference, it typically is not fully utilized because, unlike training, real time inference does not involve continuously inputting large batches of data to the model. Elastic Inference provides GPU acceleration suited for inference, allowing you to add inference acceleration to a hosted endpoint for a fraction of the cost of using a full GPU instance.
The `deploy` method of the Estimator object creates an endpoint which serves prediction requests in near real time. To utilize Elastic Inference with the SageMaker TFS container, simply provide an `accelerator_type` parameter, which determines the type of accelerator that is attached to your endpoint. Refer to the **Inference Acceleration** section of the [instance types chart](https://aws.amazon.com/sagemaker/pricing/instance-types) for a listing of the supported types of accelerators.
Here we'll use a general purpose CPU compute instance type along with an Elastic Inference accelerator: together they are much cheaper than the smallest P3 GPU instance type.
```
predictor = estimator_dist.deploy(initial_instance_count=1,
instance_type='ml.m5.xlarge',
accelerator_type='ml.eia1.medium')
```
### Real time inference
Now that we have a Predictor object wrapping a real time Amazon SageMaker hosted enpoint, we'll define the label names and look at a sample of 10 images, one from each class.
```
from IPython.display import Image, display
labels = ['airplane','automobile','bird','cat','deer','dog','frog','horse','ship','truck']
images = []
for entry in os.scandir('sample-img'):
if entry.is_file() and entry.name.endswith("png"):
images.append('sample-img/' + entry.name)
for image in images:
display(Image(image))
```
Next we'll set up the Predictor object created by the `deploy` method call above. The TFS container in Amazon SageMaker by default uses the TFS REST API, which requires requests in a specific JSON format. However, for many use cases involving image data it is more convenient to have the client application send the image data directly to a real time endpoint for predictions without converting and preprocessing it on the cliet side.
Fortunately, the Amazon SageMaker TFS container provides a data pre/post-processing feature that allows you to simply supply a data transformation script to to accomplish this. We'll discuss this feature more in the Batch Transform section of this notebook. For now, observe in the code cell below that with a preprocessing script in place, we just specify the Predictor's content type as `application/x-image` and override the default serializer, then we can simply provide the raw .png image bytes to the Predictor.
```
predictor.content_type = 'application/x-image'
predictor.serializer = None
labels = ['airplane','automobile','bird','cat','deer','dog','frog','horse','ship','truck']
def get_prediction(file_path):
with open(file_path, "rb") as image:
f = image.read()
b = bytearray(f)
return labels[np.argmax(predictor.predict(b)['predictions'], axis=1)[0]]
predictions = [get_prediction(image) for image in images]
print(predictions)
```
## Batch Transform with TFS pre/post-processing scripts
If a use case does not require individual predictions in near real-time, an Amazon SageMaker Batch Transform job is likely a better alternative. Although hosted endpoints also can be used for pseudo-batch prediction, the process is more involved than using the alternative Batch Transform, which is designed for large-scale, asynchronous batch inference.
A typical problem in working with batch inference is how to convert data into tensors that can be input to the model. For example, image data in .png or .jpg format cannot be directly input to a model, but rather must be converted first. Additionally, sometimes other preprocessing of the data must be performed, such as resizing. The Amazon SageMaker TFS container provides facilities for doing this efficiently.
### Pre/post-postprocessing script
As mentioned above, the TFS container in Amazon SageMaker by default uses the REST API to serve prediction requests. This requires the input data to be converted to JSON format. One way to do this is to create a Docker container to do the conversion, then create an overall Amazon SageMaker model that links the conversion container to the TensorFlow Serving container with the model. This is known as an Amazon SageMaker Inference Pipeline, as demonstrated in another [sample notebook](https://github.com/awslabs/amazon-sagemaker-examples/tree/master/advanced_functionality/working_with_tfrecords).
However, as a more convenient alternative for many use cases, the Amazon SageMaker TFS container provides a data pre/post-processing script feature that allows you to simply supply a data transformation script. Using such a script, there is no need to build containers or directly work with Docker. The simplest form of a script must only implement an `input_handler` and `output_handler` interface, as shown in the code below, be named `inference.py`, and be placed in a `/code` directory.
```
!cat ./code/inference.py
```
On the input preprocessing side, the code takes an image read from Amazon S3 and converts it to the required TFS REST API input format. On the output postprocessing side, the script simply passes through the predictions in the standard TFS format without modifying them. Alternatively, we could have just returned a class label for the class with the highest score, or performed other postprocessing that would be helpful to the application consuming the predictions.
### Requirements.txt
Besides an `inference.py` script implementing the handler interface, it also may be necessary to supply a `requirements.txt` file to ensure any necessary dependencies are installed in the container along with the script. For this script, in addition to the Python standard libraries we need the Pillow and Numpy libraries.
```
!cat ./code/requirements.txt
```
### Create GPU Model
When we deployed the model above to an Amazon SageMaker real time endpoint, we deployed to a CPU-based instance type, along with an attached Elastic Inference accelerator to which parts of the model computation graph are offloaded. Under the hood a CPU-based Amazon SageMaker Model object was created to wrap a CPU-based TFS container. However, for Batch Transform on a large dataset, we would prefer to use full GPU instances. To do this, we need to create another Model object that will utilize a GPU-based TFS container.
```
import boto3
from sagemaker.tensorflow.serving import Model
from time import gmtime, strftime
client = boto3.client('sagemaker')
model_name = "dist-cifar10-tf-gpu-{}".format(strftime("%d-%H-%M-%S", gmtime()))
estimator = estimator_dist
tf_serving_model = Model(model_data=estimator.model_data,
role=sagemaker.get_execution_role(),
image=estimator.image_name,
framework_version=estimator.framework_version,
sagemaker_session=estimator.sagemaker_session)
batch_instance_type = 'ml.p3.2xlarge'
tf_serving_container = tf_serving_model.prepare_container_def(batch_instance_type)
model_params = {
"ModelName": model_name,
"Containers": [
tf_serving_container
],
"ExecutionRoleArn": sagemaker.get_execution_role()
}
client.create_model(**model_params)
```
### Run a Batch Transform job
Next, we'll run a Batch Transform job using our data processing script and GPU-based Amazon SageMaker Model. More specifically, we'll perform distributed inference on a cluster of two instances. As an additional optimization, we'll set the `max_concurrent_transforms` parameter of the Transformer object, which controls the maximum number of parallel requests that can be sent to each instance in a transform job.
```
input_data_path = 's3://sagemaker-sample-data-{}/tensorflow/cifar10/images/png'.format(sagemaker_session.boto_region_name)
output_data_path = 's3://{}/{}/{}'.format(bucket, prefix, 'batch-predictions')
batch_instance_count = 2
concurrency = 100
transformer = sagemaker.transformer.Transformer(
model_name = model_name,
instance_count = batch_instance_count,
instance_type = batch_instance_type,
max_concurrent_transforms = concurrency,
strategy = 'MultiRecord',
output_path = output_data_path,
assemble_with= 'Line',
base_transform_job_name='cifar-10-image-transform',
sagemaker_session=sagemaker_session,
)
transformer.transform(data = input_data_path, content_type = 'application/x-image')
transformer.wait()
```
### Inspect Batch Transform output
Finally, we can inspect the output files of our Batch Transform job to see the predictions. First we'll download the prediction files locally, then extract the predictions from them.
```
!aws s3 cp --quiet --recursive $transformer.output_path ./batch_predictions
import json
import re
total = 0
correct = 0
predicted = []
actual = []
for entry in os.scandir('batch_predictions'):
try:
if entry.is_file() and entry.name.endswith("out"):
with open(entry, 'r') as f:
jstr = json.load(f)
results = [float('%.3f'%(item)) for sublist in jstr['predictions'] for item in sublist]
class_index = np.argmax(np.array(results))
predicted_label = labels[class_index]
predicted.append(predicted_label)
actual_label = re.search('([a-zA-Z]+).png.out', entry.name).group(1)
actual.append(actual_label)
is_correct = (predicted_label in entry.name) or False
if is_correct:
correct += 1
total += 1
except Exception as e:
print(e)
continue
```
Let's calculate the accuracy of the predictions.
```
print('Out of {} total images, accurate predictions were returned for {}'.format(total, correct))
accuracy = correct / total
print('Accuracy is {:.1%}'.format(accuracy))
```
The accuracy from the batch transform job on 10000 test images never seen during training is fairly close to the accuracy achieved during training on the validation set. This is an indication that the model is not overfitting and should generalize fairly well to other unseen data.
Next we'll plot a confusion matrix, which is a tool for visualizing the performance of a multiclass model. It has entries for all possible combinations of correct and incorrect predictions, and shows how often each one was made by our model. Ours will be row-normalized: each row sums to one, so that entries along the diagonal correspond to recall.
```
import pandas as pd
import seaborn as sns
confusion_matrix = pd.crosstab(pd.Series(actual), pd.Series(predicted), rownames=['Actuals'], colnames=['Predictions'], normalize='index')
sns.heatmap(confusion_matrix, annot=True, fmt='.2f', cmap="YlGnBu").set_title('Confusion Matrix')
```
If our model had 100% accuracy, and therefore 100% recall in every class, then all of the predictions would fall along the diagonal of the confusion matrix. Here our model definitely is not 100% accurate, but manages to achieve good recall for most of the classes, though it performs worse for some classes, such as cats.
# Extensions
Although we did not demonstrate them in this notebook, Amazon SageMaker provides additional ways to make distributed training more efficient for very large datasets:
- **VPC training**: performing Horovod training inside a VPC improves the network latency between nodes, leading to higher performance and stability of Horovod training jobs.
- **Pipe Mode**: using [Pipe Mode](https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-training-algo.html#your-algorithms-training-algo-running-container-inputdataconfig) reduces startup and training times. Pipe Mode streams training data from S3 as a Linux FIFO directly to the algorithm, without saving to disk. For a small dataset such as CIFAR-10, Pipe Mode does not provide any advantage, but for very large datasets where training is I/O bound rather than CPU/GPU bound, Pipe Mode can substantially reduce startup and training times.
# Cleanup
To avoid incurring charges due to a stray endpoint, delete the Amazon SageMaker endpoint if you no longer need it:
```
sagemaker_session.delete_endpoint(predictor.endpoint)
```
|
github_jupyter
|
%matplotlib inline
import numpy as np
import os
import sagemaker
from sagemaker import get_execution_role
os.system("aws s3 cp s3://sagemaker-workshop-pdx/cifar-10-module . --recursive")
sagemaker_session = sagemaker.Session()
role = get_execution_role()
bucket = sagemaker_session.default_bucket()
prefix = 'sagemaker/DEMO-tf-horovod-inference'
print('Bucket:\n{}'.format(bucket))
!python generate_cifar10_tfrecords.py --data-dir ./data
inputs = sagemaker_session.upload_data(path='data', key_prefix='data/DEMO-cifar10-tf')
display(inputs)
from sagemaker.tensorflow import TensorFlow
hvd_instance_type = 'ml.p3.8xlarge'
hvd_processes_per_host = 4
hvd_instance_count = 2
distributions = {'mpi': {
'enabled': True,
'processes_per_host': hvd_processes_per_host
}
}
hyperparameters = {'epochs': 60, 'batch-size' : 256}
estimator_dist = TensorFlow(base_job_name='dist-cifar10-tf',
source_dir='code',
entry_point='train.py',
role=role,
framework_version='1.12.0',
py_version='py3',
hyperparameters=hyperparameters,
train_instance_count=hvd_instance_count,
train_instance_type=hvd_instance_type,
tags = [{'Key' : 'Project', 'Value' : 'cifar10'},{'Key' : 'TensorBoard', 'Value' : 'dist'}],
distributions=distributions)
remote_inputs = {'train' : inputs+'/train', 'validation' : inputs+'/validation', 'eval' : inputs+'/eval'}
estimator_dist.fit(remote_inputs, wait=True)
predictor = estimator_dist.deploy(initial_instance_count=1,
instance_type='ml.m5.xlarge',
accelerator_type='ml.eia1.medium')
from IPython.display import Image, display
labels = ['airplane','automobile','bird','cat','deer','dog','frog','horse','ship','truck']
images = []
for entry in os.scandir('sample-img'):
if entry.is_file() and entry.name.endswith("png"):
images.append('sample-img/' + entry.name)
for image in images:
display(Image(image))
predictor.content_type = 'application/x-image'
predictor.serializer = None
labels = ['airplane','automobile','bird','cat','deer','dog','frog','horse','ship','truck']
def get_prediction(file_path):
with open(file_path, "rb") as image:
f = image.read()
b = bytearray(f)
return labels[np.argmax(predictor.predict(b)['predictions'], axis=1)[0]]
predictions = [get_prediction(image) for image in images]
print(predictions)
!cat ./code/inference.py
!cat ./code/requirements.txt
import boto3
from sagemaker.tensorflow.serving import Model
from time import gmtime, strftime
client = boto3.client('sagemaker')
model_name = "dist-cifar10-tf-gpu-{}".format(strftime("%d-%H-%M-%S", gmtime()))
estimator = estimator_dist
tf_serving_model = Model(model_data=estimator.model_data,
role=sagemaker.get_execution_role(),
image=estimator.image_name,
framework_version=estimator.framework_version,
sagemaker_session=estimator.sagemaker_session)
batch_instance_type = 'ml.p3.2xlarge'
tf_serving_container = tf_serving_model.prepare_container_def(batch_instance_type)
model_params = {
"ModelName": model_name,
"Containers": [
tf_serving_container
],
"ExecutionRoleArn": sagemaker.get_execution_role()
}
client.create_model(**model_params)
input_data_path = 's3://sagemaker-sample-data-{}/tensorflow/cifar10/images/png'.format(sagemaker_session.boto_region_name)
output_data_path = 's3://{}/{}/{}'.format(bucket, prefix, 'batch-predictions')
batch_instance_count = 2
concurrency = 100
transformer = sagemaker.transformer.Transformer(
model_name = model_name,
instance_count = batch_instance_count,
instance_type = batch_instance_type,
max_concurrent_transforms = concurrency,
strategy = 'MultiRecord',
output_path = output_data_path,
assemble_with= 'Line',
base_transform_job_name='cifar-10-image-transform',
sagemaker_session=sagemaker_session,
)
transformer.transform(data = input_data_path, content_type = 'application/x-image')
transformer.wait()
!aws s3 cp --quiet --recursive $transformer.output_path ./batch_predictions
import json
import re
total = 0
correct = 0
predicted = []
actual = []
for entry in os.scandir('batch_predictions'):
try:
if entry.is_file() and entry.name.endswith("out"):
with open(entry, 'r') as f:
jstr = json.load(f)
results = [float('%.3f'%(item)) for sublist in jstr['predictions'] for item in sublist]
class_index = np.argmax(np.array(results))
predicted_label = labels[class_index]
predicted.append(predicted_label)
actual_label = re.search('([a-zA-Z]+).png.out', entry.name).group(1)
actual.append(actual_label)
is_correct = (predicted_label in entry.name) or False
if is_correct:
correct += 1
total += 1
except Exception as e:
print(e)
continue
print('Out of {} total images, accurate predictions were returned for {}'.format(total, correct))
accuracy = correct / total
print('Accuracy is {:.1%}'.format(accuracy))
import pandas as pd
import seaborn as sns
confusion_matrix = pd.crosstab(pd.Series(actual), pd.Series(predicted), rownames=['Actuals'], colnames=['Predictions'], normalize='index')
sns.heatmap(confusion_matrix, annot=True, fmt='.2f', cmap="YlGnBu").set_title('Confusion Matrix')
sagemaker_session.delete_endpoint(predictor.endpoint)
| 0.311741 | 0.98661 |
```
#export
import openai, pandas as pd, numpy as np, datetime, json, time
from OpenAISurveyWrapper import wrapper
#hide
df = pd.read_csv("../vice-yikyak-2020-06-09_vice-random-2020-06-23.csv")
len(df)
openai.api_key = json.load(open("../gpt3/key.json", "r"))["key"]
import time
responses = []
trgts = df[:5000]
startIdx = 0
endIdx = 200
while startIdx < len(trgts):
#responses = responses + trgts[startIdx:endIdx].text.to_list()
responses = responses + openai.Engine("davinci").search(documents=\
trgts[startIdx:endIdx].text.to_list(), query="alcohol")["data"]
startIdx += 200
endIdx += 200
time.sleep(2)
len(responses)
trgts["seach_sim"] = [float(x["score"]) for x in responses]
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > 250 else 0)
trgts[trgts.Drinking==1]
from sklearn.metrics import f1_score
cutoff_f1 = pd.DataFrame()
for i in range(0, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1.at["f1", i] = f1_score(trgts.Drinking[:1000], trgts.search_pos[:1000])
import matplotlib.pyplot as plt
%matplotlib inline
ax = cutoff_f1.T.rename(columns={"f1":"train"}).plot()
cutoff = cutoff_f1.T.idxmax()[0]
cutoff
cutoff_f1_test = pd.DataFrame()
for i in range(100, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1_test.at["f1", i] = f1_score(trgts.Drinking[1000:], trgts.search_pos[1000:])
ax = cutoff_f1.T.rename(columns={"f1":"train"}).plot()
cutoff_f1_test.T.rename(columns={"f1":"test"}).plot(ax=ax)
cutoff_f1_test[434]
trgts.to_pickle("../alcohol.pkl")
import time
responses = []
trgts = df[:5000]
startIdx = 0
endIdx = 200
while startIdx < len(trgts):
#responses = responses + trgts[startIdx:endIdx].text.to_list()
responses = responses + openai.Engine("davinci").search(documents=\
trgts[startIdx:endIdx].text.to_list(), query="drinking")["data"]
startIdx += 200
endIdx += 200
time.sleep(2)
trgts["seach_sim"] = [float(x["score"]) for x in responses]
cutoff_f1 = pd.DataFrame()
for i in range(0, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1.at["f1", i] = f1_score(trgts.Drinking[:1000], trgts.search_pos[:1000])
cutoff = cutoff_f1.T.idxmax()[0]
cutoff
cutoff_f1_test = pd.DataFrame()
for i in range(100, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1_test.at["f1", i] = f1_score(trgts.Drinking[1000:], trgts.search_pos[1000:])
ax = cutoff_f1.T.rename(columns={"f1":"train"}).plot()
cutoff_f1_test.T.rename(columns={"f1":"test"}).plot(ax=ax)
trgts.to_pickle("../drinking.pkl")
cutoff_f1_test[448]
```
# wait, try other
```
responses = []
trgts = df[:5000]
startIdx = 0
endIdx = 200
while startIdx < len(trgts):
#responses = responses + trgts[startIdx:endIdx].text.to_list()
responses = responses + openai.Engine("davinci").search(documents=\
trgts[startIdx:endIdx].text.to_list(), query="drinking and alcohol")["data"]
startIdx += 200
endIdx += 200
time.sleep(2)
trgts["seach_sim"] = [float(x["score"]) for x in responses]
cutoff_f1 = pd.DataFrame()
for i in range(0, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1.at["f1", i] = f1_score(trgts.Drinking[:1000], trgts.search_pos[:1000])
cutoff = cutoff_f1.T.idxmax()[0]
cutoff
cutoff_f1_test = pd.DataFrame()
for i in range(100, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1_test.at["f1", i] = f1_score(trgts.Drinking[1000:], trgts.search_pos[1000:])
ax = cutoff_f1.T.rename(columns={"f1":"train"}).plot()
cutoff_f1_test.T.rename(columns={"f1":"test"}).plot(ax=ax)
trgts.to_pickle("../drinking and alcohol.pkl")
cutoff_f1_test[cutoff]
trgts = pd.read_pickle("../drinking and alcohol.pkl")
trgts
cutoff_f1 = pd.DataFrame()
for i in range(0, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1.at["f1", i] = f1_score(trgts.Drinking[:1000], trgts.search_pos[:1000])
cutoff = cutoff_f1.T.idxmax()[0]
cutoff_f1_test = pd.DataFrame()
for i in range(100, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1_test.at["f1", i] = f1_score(trgts.Drinking[1000:], trgts.search_pos[1000:])
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > cutoff else 0)
ddd = trgts[1000:]
ddd[ddd["search_pos"] != ddd["Drinking"]]
trgts = df[:5000]
betterTrain = df[5000:]
betterTrainAlcohol = betterTrain[betterTrain.Drinking==1][:100]
betterTrainNoAlcohol = betterTrain[betterTrain.Drinking!=1][:100]
newtrgts = pd.concat([betterTrainAlcohol, betterTrainNoAlcohol, trgts[200:]])
trgts = newtrgts.reset_index(drop=True)
responses = []
startIdx = 0
endIdx = 200
while startIdx < len(trgts):
#responses = responses + trgts[startIdx:endIdx].text.to_list()
responses = responses + openai.Engine("davinci").search(documents=\
trgts[startIdx:endIdx].text.to_list(), query="drinking and alcohol")["data"]
startIdx += 200
endIdx += 200
trgts["seach_sim"] = [float(x["score"]) for x in responses]
cutoff_f1 = pd.DataFrame()
for i in range(0, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1.at["f1", i] = f1_score(trgts.Drinking[:1000], trgts.search_pos[:1000])
cutoff = cutoff_f1.T.idxmax()[0]
cutoff_f1_test = pd.DataFrame()
for i in range(100, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1_test.at["f1", i] = f1_score(trgts.Drinking[1000:], trgts.search_pos[1000:])
ax = cutoff_f1.T.rename(columns={"f1":"train"}).plot()
cutoff_f1_test.T.rename(columns={"f1":"test"}).plot(ax=ax)
trgts.to_pickle("../drinking and alcohol_smartstart.pkl")
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > cutoff else 0)
cutoff_f1_test[cutoff]
pd.set_option('display.max_colwidth', -1)
trgts[trgts["search_pos"] != trgts["Drinking"]]
```
# TRY OR
```
responses = []
trgts = df[:5000]
startIdx = 0
endIdx = 200
while startIdx < len(trgts):
#responses = responses + trgts[startIdx:endIdx].text.to_list()
responses = responses + openai.Engine("davinci").search(documents=\
trgts[startIdx:endIdx].text.to_list(), query="drunk or drinking")["data"]
startIdx += 200
endIdx += 200
time.sleep(2)
trgts["seach_sim"] = [float(x["score"]) for x in responses]
cutoff_f1 = pd.DataFrame()
for i in range(0, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1.at["f1", i] = f1_score(trgts.Drinking[:1000], trgts.search_pos[:1000])
cutoff = cutoff_f1.T.idxmax()[0]
cutoff
cutoff_f1_test = pd.DataFrame()
for i in range(0, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1_test.at["f1", i] = f1_score(trgts.Drinking[1000:], trgts.search_pos[1000:])
ax = cutoff_f1.T.rename(columns={"f1":"train"}).plot()
cutoff_f1_test.T.rename(columns={"f1":"test"}).plot(ax=ax)
trgts.to_pickle("../drunk or drinking.pkl")
cutoff_f1_test[cutoff]
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > cutoff else 0)
trgts[trgts["search_pos"] != trgts["Drinking"]]
# Wine, beer, or liquor
responses = []
trgts = df[:5000]
startIdx = 0
endIdx = 200
while startIdx < len(trgts):
#responses = responses + trgts[startIdx:endIdx].text.to_list()
responses = responses + openai.Engine("davinci").search(documents=\
trgts[startIdx:endIdx].text.to_list(), query="wine or beer or liquor")["data"]
startIdx += 200
endIdx += 200
time.sleep(2)
trgts["seach_sim"] = [float(x["score"]) for x in responses]
cutoff_f1 = pd.DataFrame()
for i in range(0, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1.at["f1", i] = f1_score(trgts.Drinking[:1000], trgts.search_pos[:1000])
cutoff = cutoff_f1.T.idxmax()[0]
cutoff
cutoff_f1_test = pd.DataFrame()
for i in range(0, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1_test.at["f1", i] = f1_score(trgts.Drinking[1000:], trgts.search_pos[1000:])
ax = cutoff_f1.T.rename(columns={"f1":"train"}).plot()
cutoff_f1_test.T.rename(columns={"f1":"test"}).plot(ax=ax)
trgts.to_pickle("../wine or beer or liquor.pkl")
cutoff_f1_test[cutoff]
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > cutoff else 0)
trgts[trgts["search_pos"] != trgts["Drinking"]]
```
# Drinking Wine and Alcohol
```
responses = []
trgts = df[:5000]
startIdx = 0
endIdx = 200
while startIdx < len(trgts):
#responses = responses + trgts[startIdx:endIdx].text.to_list()
responses = responses + openai.Engine("davinci").search(documents=\
trgts[startIdx:endIdx].text.to_list(), query="drinking wine and alcohol")["data"]
startIdx += 200
endIdx += 200
time.sleep(2)
trgts["seach_sim"] = [float(x["score"]) for x in responses]
cutoff_f1 = pd.DataFrame()
for i in range(0, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1.at["f1", i] = f1_score(trgts.Drinking[:1000], trgts.search_pos[:1000])
cutoff = cutoff_f1.T.idxmax()[0]
cutoff
cutoff_f1_test = pd.DataFrame()
for i in range(0, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1_test.at["f1", i] = f1_score(trgts.Drinking[1000:], trgts.search_pos[1000:])
ax = cutoff_f1.T.rename(columns={"f1":"train"}).plot()
cutoff_f1_test.T.rename(columns={"f1":"test"}).plot(ax=ax)
trgts.to_pickle("../drinking wine and alcohol.pkl")
cutoff_f1_test[cutoff]
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > cutoff else 0)
trgts[trgts["search_pos"] != trgts["Drinking"]]
```
# Wine or beer
```
responses = []
trgts = df[:5000]
startIdx = 0
endIdx = 200
while startIdx < len(trgts):
#responses = responses + trgts[startIdx:endIdx].text.to_list()
responses = responses + openai.Engine("davinci").search(documents=\
trgts[startIdx:endIdx].text.to_list(), query="wine or beer")["data"]
startIdx += 200
endIdx += 200
time.sleep(2)
trgts["seach_sim"] = [float(x["score"]) for x in responses]
cutoff_f1 = pd.DataFrame()
for i in range(0, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1.at["f1", i] = f1_score(trgts.Drinking[:1000], trgts.search_pos[:1000])
cutoff = cutoff_f1.T.idxmax()[0]
cutoff
cutoff_f1_test = pd.DataFrame()
for i in range(0, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1_test.at["f1", i] = f1_score(trgts.Drinking[1000:], trgts.search_pos[1000:])
ax = cutoff_f1.T.rename(columns={"f1":"train"}).plot()
cutoff_f1_test.T.rename(columns={"f1":"test"}).plot(ax=ax)
trgts.to_pickle("../wine or beer.pkl")
```
# wine beer
```
responses = []
trgts = df[:5000]
startIdx = 0
endIdx = 200
while startIdx < len(trgts):
#responses = responses + trgts[startIdx:endIdx].text.to_list()
responses = responses + openai.Engine("davinci").search(documents=\
trgts[startIdx:endIdx].text.to_list(), query="wine beer")["data"]
startIdx += 200
endIdx += 200
time.sleep(2)
trgts["seach_sim"] = [float(x["score"]) for x in responses]
cutoff_f1 = pd.DataFrame()
for i in range(0, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1.at["f1", i] = f1_score(trgts.Drinking[:1000], trgts.search_pos[:1000])
cutoff = cutoff_f1.T.idxmax()[0]
cutoff
cutoff_f1_test = pd.DataFrame()
for i in range(0, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1_test.at["f1", i] = f1_score(trgts.Drinking[1000:], trgts.search_pos[1000:])
ax = cutoff_f1.T.rename(columns={"f1":"train"}).plot()
cutoff_f1_test.T.rename(columns={"f1":"test"}).plot(ax=ax)
trgts.to_pickle("../wine beer.pkl")
responses = []
trgts = df[:5000]
startIdx = 0
endIdx = 200
while startIdx < len(trgts):
#responses = responses + trgts[startIdx:endIdx].text.to_list()
responses = responses + openai.Engine("davinci").search(documents=\
trgts[startIdx:endIdx].text.to_list(), query="wine or beer or alcohol or drunk")["data"]
startIdx += 200
endIdx += 200
time.sleep(2)
trgts["seach_sim"] = [float(x["score"]) for x in responses]
cutoff_f1 = pd.DataFrame()
for i in range(0, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1.at["f1", i] = f1_score(trgts.Drinking[:1000], trgts.search_pos[:1000])
cutoff = cutoff_f1.T.idxmax()[0]
cutoff
cutoff_f1_test = pd.DataFrame()
for i in range(0, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1_test.at["f1", i] = f1_score(trgts.Drinking[1000:], trgts.search_pos[1000:])
ax = cutoff_f1.T.rename(columns={"f1":"train"}).plot()
cutoff_f1_test.T.rename(columns={"f1":"test"}).plot(ax=ax)
trgts.to_pickle("../wine or beer or alcohol or drunk.pkl")
cutoff_f1_test[cutoff]
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > cutoff else 0)
trgts[trgts["search_pos"] != trgts["Drinking"]]
```
# asdf
```
responses = []
trgts = df[:5000]
startIdx = 0
endIdx = 200
while startIdx < len(trgts):
#responses = responses + trgts[startIdx:endIdx].text.to_list()
responses = responses + openai.Engine("davinci").search(documents=\
trgts[startIdx:endIdx].text.to_list(), query="wine or beer or alcohol or drunk")["data"]
startIdx += 200
endIdx += 200
time.sleep(2)
trgts["seach_sim"] = [float(x["score"]) for x in responses]
cutoff_f1 = pd.DataFrame()
for i in range(0, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1.at["f1", i] = f1_score(trgts.Drinking[:1000], trgts.search_pos[:1000])
cutoff = cutoff_f1.T.idxmax()[0]
cutoff
cutoff_f1_test = pd.DataFrame()
for i in range(0, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1_test.at["f1", i] = f1_score(trgts.Drinking[1000:], trgts.search_pos[1000:])
ax = cutoff_f1.T.rename(columns={"f1":"train"}).plot()
cutoff_f1_test.T.rename(columns={"f1":"test"}).plot(ax=ax)
trgts.to_pickle("../wine or beer or alcohol or drunk.pkl")
```
|
github_jupyter
|
#export
import openai, pandas as pd, numpy as np, datetime, json, time
from OpenAISurveyWrapper import wrapper
#hide
df = pd.read_csv("../vice-yikyak-2020-06-09_vice-random-2020-06-23.csv")
len(df)
openai.api_key = json.load(open("../gpt3/key.json", "r"))["key"]
import time
responses = []
trgts = df[:5000]
startIdx = 0
endIdx = 200
while startIdx < len(trgts):
#responses = responses + trgts[startIdx:endIdx].text.to_list()
responses = responses + openai.Engine("davinci").search(documents=\
trgts[startIdx:endIdx].text.to_list(), query="alcohol")["data"]
startIdx += 200
endIdx += 200
time.sleep(2)
len(responses)
trgts["seach_sim"] = [float(x["score"]) for x in responses]
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > 250 else 0)
trgts[trgts.Drinking==1]
from sklearn.metrics import f1_score
cutoff_f1 = pd.DataFrame()
for i in range(0, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1.at["f1", i] = f1_score(trgts.Drinking[:1000], trgts.search_pos[:1000])
import matplotlib.pyplot as plt
%matplotlib inline
ax = cutoff_f1.T.rename(columns={"f1":"train"}).plot()
cutoff = cutoff_f1.T.idxmax()[0]
cutoff
cutoff_f1_test = pd.DataFrame()
for i in range(100, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1_test.at["f1", i] = f1_score(trgts.Drinking[1000:], trgts.search_pos[1000:])
ax = cutoff_f1.T.rename(columns={"f1":"train"}).plot()
cutoff_f1_test.T.rename(columns={"f1":"test"}).plot(ax=ax)
cutoff_f1_test[434]
trgts.to_pickle("../alcohol.pkl")
import time
responses = []
trgts = df[:5000]
startIdx = 0
endIdx = 200
while startIdx < len(trgts):
#responses = responses + trgts[startIdx:endIdx].text.to_list()
responses = responses + openai.Engine("davinci").search(documents=\
trgts[startIdx:endIdx].text.to_list(), query="drinking")["data"]
startIdx += 200
endIdx += 200
time.sleep(2)
trgts["seach_sim"] = [float(x["score"]) for x in responses]
cutoff_f1 = pd.DataFrame()
for i in range(0, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1.at["f1", i] = f1_score(trgts.Drinking[:1000], trgts.search_pos[:1000])
cutoff = cutoff_f1.T.idxmax()[0]
cutoff
cutoff_f1_test = pd.DataFrame()
for i in range(100, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1_test.at["f1", i] = f1_score(trgts.Drinking[1000:], trgts.search_pos[1000:])
ax = cutoff_f1.T.rename(columns={"f1":"train"}).plot()
cutoff_f1_test.T.rename(columns={"f1":"test"}).plot(ax=ax)
trgts.to_pickle("../drinking.pkl")
cutoff_f1_test[448]
responses = []
trgts = df[:5000]
startIdx = 0
endIdx = 200
while startIdx < len(trgts):
#responses = responses + trgts[startIdx:endIdx].text.to_list()
responses = responses + openai.Engine("davinci").search(documents=\
trgts[startIdx:endIdx].text.to_list(), query="drinking and alcohol")["data"]
startIdx += 200
endIdx += 200
time.sleep(2)
trgts["seach_sim"] = [float(x["score"]) for x in responses]
cutoff_f1 = pd.DataFrame()
for i in range(0, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1.at["f1", i] = f1_score(trgts.Drinking[:1000], trgts.search_pos[:1000])
cutoff = cutoff_f1.T.idxmax()[0]
cutoff
cutoff_f1_test = pd.DataFrame()
for i in range(100, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1_test.at["f1", i] = f1_score(trgts.Drinking[1000:], trgts.search_pos[1000:])
ax = cutoff_f1.T.rename(columns={"f1":"train"}).plot()
cutoff_f1_test.T.rename(columns={"f1":"test"}).plot(ax=ax)
trgts.to_pickle("../drinking and alcohol.pkl")
cutoff_f1_test[cutoff]
trgts = pd.read_pickle("../drinking and alcohol.pkl")
trgts
cutoff_f1 = pd.DataFrame()
for i in range(0, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1.at["f1", i] = f1_score(trgts.Drinking[:1000], trgts.search_pos[:1000])
cutoff = cutoff_f1.T.idxmax()[0]
cutoff_f1_test = pd.DataFrame()
for i in range(100, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1_test.at["f1", i] = f1_score(trgts.Drinking[1000:], trgts.search_pos[1000:])
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > cutoff else 0)
ddd = trgts[1000:]
ddd[ddd["search_pos"] != ddd["Drinking"]]
trgts = df[:5000]
betterTrain = df[5000:]
betterTrainAlcohol = betterTrain[betterTrain.Drinking==1][:100]
betterTrainNoAlcohol = betterTrain[betterTrain.Drinking!=1][:100]
newtrgts = pd.concat([betterTrainAlcohol, betterTrainNoAlcohol, trgts[200:]])
trgts = newtrgts.reset_index(drop=True)
responses = []
startIdx = 0
endIdx = 200
while startIdx < len(trgts):
#responses = responses + trgts[startIdx:endIdx].text.to_list()
responses = responses + openai.Engine("davinci").search(documents=\
trgts[startIdx:endIdx].text.to_list(), query="drinking and alcohol")["data"]
startIdx += 200
endIdx += 200
trgts["seach_sim"] = [float(x["score"]) for x in responses]
cutoff_f1 = pd.DataFrame()
for i in range(0, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1.at["f1", i] = f1_score(trgts.Drinking[:1000], trgts.search_pos[:1000])
cutoff = cutoff_f1.T.idxmax()[0]
cutoff_f1_test = pd.DataFrame()
for i in range(100, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1_test.at["f1", i] = f1_score(trgts.Drinking[1000:], trgts.search_pos[1000:])
ax = cutoff_f1.T.rename(columns={"f1":"train"}).plot()
cutoff_f1_test.T.rename(columns={"f1":"test"}).plot(ax=ax)
trgts.to_pickle("../drinking and alcohol_smartstart.pkl")
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > cutoff else 0)
cutoff_f1_test[cutoff]
pd.set_option('display.max_colwidth', -1)
trgts[trgts["search_pos"] != trgts["Drinking"]]
responses = []
trgts = df[:5000]
startIdx = 0
endIdx = 200
while startIdx < len(trgts):
#responses = responses + trgts[startIdx:endIdx].text.to_list()
responses = responses + openai.Engine("davinci").search(documents=\
trgts[startIdx:endIdx].text.to_list(), query="drunk or drinking")["data"]
startIdx += 200
endIdx += 200
time.sleep(2)
trgts["seach_sim"] = [float(x["score"]) for x in responses]
cutoff_f1 = pd.DataFrame()
for i in range(0, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1.at["f1", i] = f1_score(trgts.Drinking[:1000], trgts.search_pos[:1000])
cutoff = cutoff_f1.T.idxmax()[0]
cutoff
cutoff_f1_test = pd.DataFrame()
for i in range(0, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1_test.at["f1", i] = f1_score(trgts.Drinking[1000:], trgts.search_pos[1000:])
ax = cutoff_f1.T.rename(columns={"f1":"train"}).plot()
cutoff_f1_test.T.rename(columns={"f1":"test"}).plot(ax=ax)
trgts.to_pickle("../drunk or drinking.pkl")
cutoff_f1_test[cutoff]
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > cutoff else 0)
trgts[trgts["search_pos"] != trgts["Drinking"]]
# Wine, beer, or liquor
responses = []
trgts = df[:5000]
startIdx = 0
endIdx = 200
while startIdx < len(trgts):
#responses = responses + trgts[startIdx:endIdx].text.to_list()
responses = responses + openai.Engine("davinci").search(documents=\
trgts[startIdx:endIdx].text.to_list(), query="wine or beer or liquor")["data"]
startIdx += 200
endIdx += 200
time.sleep(2)
trgts["seach_sim"] = [float(x["score"]) for x in responses]
cutoff_f1 = pd.DataFrame()
for i in range(0, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1.at["f1", i] = f1_score(trgts.Drinking[:1000], trgts.search_pos[:1000])
cutoff = cutoff_f1.T.idxmax()[0]
cutoff
cutoff_f1_test = pd.DataFrame()
for i in range(0, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1_test.at["f1", i] = f1_score(trgts.Drinking[1000:], trgts.search_pos[1000:])
ax = cutoff_f1.T.rename(columns={"f1":"train"}).plot()
cutoff_f1_test.T.rename(columns={"f1":"test"}).plot(ax=ax)
trgts.to_pickle("../wine or beer or liquor.pkl")
cutoff_f1_test[cutoff]
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > cutoff else 0)
trgts[trgts["search_pos"] != trgts["Drinking"]]
responses = []
trgts = df[:5000]
startIdx = 0
endIdx = 200
while startIdx < len(trgts):
#responses = responses + trgts[startIdx:endIdx].text.to_list()
responses = responses + openai.Engine("davinci").search(documents=\
trgts[startIdx:endIdx].text.to_list(), query="drinking wine and alcohol")["data"]
startIdx += 200
endIdx += 200
time.sleep(2)
trgts["seach_sim"] = [float(x["score"]) for x in responses]
cutoff_f1 = pd.DataFrame()
for i in range(0, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1.at["f1", i] = f1_score(trgts.Drinking[:1000], trgts.search_pos[:1000])
cutoff = cutoff_f1.T.idxmax()[0]
cutoff
cutoff_f1_test = pd.DataFrame()
for i in range(0, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1_test.at["f1", i] = f1_score(trgts.Drinking[1000:], trgts.search_pos[1000:])
ax = cutoff_f1.T.rename(columns={"f1":"train"}).plot()
cutoff_f1_test.T.rename(columns={"f1":"test"}).plot(ax=ax)
trgts.to_pickle("../drinking wine and alcohol.pkl")
cutoff_f1_test[cutoff]
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > cutoff else 0)
trgts[trgts["search_pos"] != trgts["Drinking"]]
responses = []
trgts = df[:5000]
startIdx = 0
endIdx = 200
while startIdx < len(trgts):
#responses = responses + trgts[startIdx:endIdx].text.to_list()
responses = responses + openai.Engine("davinci").search(documents=\
trgts[startIdx:endIdx].text.to_list(), query="wine or beer")["data"]
startIdx += 200
endIdx += 200
time.sleep(2)
trgts["seach_sim"] = [float(x["score"]) for x in responses]
cutoff_f1 = pd.DataFrame()
for i in range(0, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1.at["f1", i] = f1_score(trgts.Drinking[:1000], trgts.search_pos[:1000])
cutoff = cutoff_f1.T.idxmax()[0]
cutoff
cutoff_f1_test = pd.DataFrame()
for i in range(0, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1_test.at["f1", i] = f1_score(trgts.Drinking[1000:], trgts.search_pos[1000:])
ax = cutoff_f1.T.rename(columns={"f1":"train"}).plot()
cutoff_f1_test.T.rename(columns={"f1":"test"}).plot(ax=ax)
trgts.to_pickle("../wine or beer.pkl")
responses = []
trgts = df[:5000]
startIdx = 0
endIdx = 200
while startIdx < len(trgts):
#responses = responses + trgts[startIdx:endIdx].text.to_list()
responses = responses + openai.Engine("davinci").search(documents=\
trgts[startIdx:endIdx].text.to_list(), query="wine beer")["data"]
startIdx += 200
endIdx += 200
time.sleep(2)
trgts["seach_sim"] = [float(x["score"]) for x in responses]
cutoff_f1 = pd.DataFrame()
for i in range(0, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1.at["f1", i] = f1_score(trgts.Drinking[:1000], trgts.search_pos[:1000])
cutoff = cutoff_f1.T.idxmax()[0]
cutoff
cutoff_f1_test = pd.DataFrame()
for i in range(0, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1_test.at["f1", i] = f1_score(trgts.Drinking[1000:], trgts.search_pos[1000:])
ax = cutoff_f1.T.rename(columns={"f1":"train"}).plot()
cutoff_f1_test.T.rename(columns={"f1":"test"}).plot(ax=ax)
trgts.to_pickle("../wine beer.pkl")
responses = []
trgts = df[:5000]
startIdx = 0
endIdx = 200
while startIdx < len(trgts):
#responses = responses + trgts[startIdx:endIdx].text.to_list()
responses = responses + openai.Engine("davinci").search(documents=\
trgts[startIdx:endIdx].text.to_list(), query="wine or beer or alcohol or drunk")["data"]
startIdx += 200
endIdx += 200
time.sleep(2)
trgts["seach_sim"] = [float(x["score"]) for x in responses]
cutoff_f1 = pd.DataFrame()
for i in range(0, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1.at["f1", i] = f1_score(trgts.Drinking[:1000], trgts.search_pos[:1000])
cutoff = cutoff_f1.T.idxmax()[0]
cutoff
cutoff_f1_test = pd.DataFrame()
for i in range(0, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1_test.at["f1", i] = f1_score(trgts.Drinking[1000:], trgts.search_pos[1000:])
ax = cutoff_f1.T.rename(columns={"f1":"train"}).plot()
cutoff_f1_test.T.rename(columns={"f1":"test"}).plot(ax=ax)
trgts.to_pickle("../wine or beer or alcohol or drunk.pkl")
cutoff_f1_test[cutoff]
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > cutoff else 0)
trgts[trgts["search_pos"] != trgts["Drinking"]]
responses = []
trgts = df[:5000]
startIdx = 0
endIdx = 200
while startIdx < len(trgts):
#responses = responses + trgts[startIdx:endIdx].text.to_list()
responses = responses + openai.Engine("davinci").search(documents=\
trgts[startIdx:endIdx].text.to_list(), query="wine or beer or alcohol or drunk")["data"]
startIdx += 200
endIdx += 200
time.sleep(2)
trgts["seach_sim"] = [float(x["score"]) for x in responses]
cutoff_f1 = pd.DataFrame()
for i in range(0, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1.at["f1", i] = f1_score(trgts.Drinking[:1000], trgts.search_pos[:1000])
cutoff = cutoff_f1.T.idxmax()[0]
cutoff
cutoff_f1_test = pd.DataFrame()
for i in range(0, 700):
trgts["search_pos"] = trgts["seach_sim"].apply(lambda x: 1 if x > i else 0)
cutoff_f1_test.at["f1", i] = f1_score(trgts.Drinking[1000:], trgts.search_pos[1000:])
ax = cutoff_f1.T.rename(columns={"f1":"train"}).plot()
cutoff_f1_test.T.rename(columns={"f1":"test"}).plot(ax=ax)
trgts.to_pickle("../wine or beer or alcohol or drunk.pkl")
| 0.152347 | 0.491639 |
<!--BOOK_INFORMATION-->
<img style="float: right; width: 100px" src="https://raw.github.com/pyomeca/design/master/logo/logo_cropped.svg?sanitize=true">
<font size="+2">Pyosim in the cloud</font>
<font size="+1">with [pyomeca](https://github.com/pyomeca/pyom</font>a)
Romain Martinez (martinez.staps@gmail.com | [GitHub](https://github.com/romainmartinez))
<!--NAVIGATION-->
< [Muscle activations & muscles forces](03.03-muscle-activations-forces.ipynb) | [Contents](Index.ipynb) |
# Joint reactions
$$\text{shear:compression} = \frac{\sqrt{x^2 + y^2}}{|z|}$$
```
from pathlib import Path
import numpy as np
import pandas as pd
import altair as alt
from pyosim import Conf
from pyomeca import Analogs3d
import spm1d
import matplotlib.pyplot as plt
from src.util import (
condition_counter,
random_balanced_design,
get_spm_cluster,
)
%load_ext autoreload
%autoreload 2
%load_ext lab_black
alt.data_transformers.enable("json")
# to make this notebook's output stable across runs
RANDOM_SEED = 42
np.random.seed(RANDOM_SEED)
project_path = Path("/media/romain/E/Projet_ExpertsNovices/opensim")
conf = Conf(project_path=project_path)
conf.check_confs()
```
## Reading files
```
def parse_conditions(d):
return (
d.assign(
mass=lambda x: x["filename"]
.str.contains("r08")
.replace({True: 8, False: 12})
)
.merge(
pd.read_csv(project_path / "_conf.csv")[["participant", "group"]].rename(
columns={"group": "men"}
),
on="participant",
how="left",
)
.assign(height=1)
)
suffix = "_JointReaction_ReactionLoads"
threshold = 0.56 # dislocation ratio (Dickerson et al.)
d = (
pd.concat(
[
Analogs3d.from_sto(
ifile, na_values=[" nan", " -nan"]
)
.time_normalization()
.to_dataframe()
.assign(
participant=ifile.parts[-3],
filename=lambda x: ifile.stem + ifile.parts[-3] + f"{i}",
)
.reset_index()
for i, ifile in enumerate(
conf.project_path.glob(f"*/5_joint_reaction_force/*{suffix}*")
)
]
)
.pipe(parse_conditions)
.eval(
"sc_ratio = sqrt(GHJ_on_humerus_in_glenoid_fx ** 2 + GHJ_on_humerus_in_glenoid_fy ** 2) / GHJ_on_humerus_in_glenoid_fz.abs()"
)
.assign(
index=lambda x: x["index"] / 100, ratio_sup=lambda x: x["sc_ratio"] > threshold
)
)
d = d.drop(d.filter(like="GH", axis=1), axis=1)
d.head()
```
## Clean data
### Balance dataset
By randomly taking the minimum number of trials for each condition
```
d.drop_duplicates(["filename"]).groupby(["men", "mass"]).size()
balanced_trials = random_balanced_design(d, ["men", "mass"], random_state=RANDOM_SEED)[
"filename"
].to_list()
d = d.query("filename == @balanced_trials")
d.drop_duplicates(["filename"]).groupby(["men", "mass"]).size()
```
## Time above dislocation ratio
```
sup_ratio = (
d.groupby(["filename", "men", "mass"])["ratio_sup"]
.apply(lambda x: x.sum() / x.shape[0])
.reset_index()
)
sup_ratio.sample(5)
sup_ratio.shape
men_scale = alt.Scale(scheme="set1")
alt.Chart(sup_ratio).mark_boxplot().encode(
alt.X("men:N", axis=alt.Axis(labels=False, ticks=False, domain=False), title=None),
alt.Y("ratio_sup", axis=alt.Axis(format="%"), title="c:s > 0.56 (% temps total)"),
alt.Color("men:N", scale=men_scale),
).facet(column="mass")
import spm1d
alpha = 0.05
n_iter = 10_000
spm = spm1d.stats.nonparam.anova2(
y=sup_ratio["ratio_sup"], A=sup_ratio["men"], B=sup_ratio["mass"]
)
spmi = spm.inference(alpha=alpha, iterations=n_iter)
spmi
for ispmi in spmi:
if ispmi.h0reject:
print(ispmi.effect)
def cohend(a, b):
d = (a.mean() - b.mean()) / (np.sqrt((a.std() ** 2 + b.std() ** 2) / 2))
if np.abs(d) >= 0.8:
effect = "large"
elif np.abs(d) >= 0.5:
effect = "medium"
elif np.abs(d) >= 0.2:
effect = "small"
else:
effect = "no"
return d, effect
a, b = [i.values for _, i in sup_ratio.groupby("mass")["ratio_sup"]]
print(a.mean() - b.mean())
cohend(a, b)
sup_ratio.query("men == 0")["ratio_sup"].mean() - sup_ratio.query("men == 1")[
"ratio_sup"
].mean()
```
<!--NAVIGATION-->
< [Muscle activations & muscles forces](03.03-muscle-activations-forces.ipynb) | [Contents](Index.ipynb) |
|
github_jupyter
|
from pathlib import Path
import numpy as np
import pandas as pd
import altair as alt
from pyosim import Conf
from pyomeca import Analogs3d
import spm1d
import matplotlib.pyplot as plt
from src.util import (
condition_counter,
random_balanced_design,
get_spm_cluster,
)
%load_ext autoreload
%autoreload 2
%load_ext lab_black
alt.data_transformers.enable("json")
# to make this notebook's output stable across runs
RANDOM_SEED = 42
np.random.seed(RANDOM_SEED)
project_path = Path("/media/romain/E/Projet_ExpertsNovices/opensim")
conf = Conf(project_path=project_path)
conf.check_confs()
def parse_conditions(d):
return (
d.assign(
mass=lambda x: x["filename"]
.str.contains("r08")
.replace({True: 8, False: 12})
)
.merge(
pd.read_csv(project_path / "_conf.csv")[["participant", "group"]].rename(
columns={"group": "men"}
),
on="participant",
how="left",
)
.assign(height=1)
)
suffix = "_JointReaction_ReactionLoads"
threshold = 0.56 # dislocation ratio (Dickerson et al.)
d = (
pd.concat(
[
Analogs3d.from_sto(
ifile, na_values=[" nan", " -nan"]
)
.time_normalization()
.to_dataframe()
.assign(
participant=ifile.parts[-3],
filename=lambda x: ifile.stem + ifile.parts[-3] + f"{i}",
)
.reset_index()
for i, ifile in enumerate(
conf.project_path.glob(f"*/5_joint_reaction_force/*{suffix}*")
)
]
)
.pipe(parse_conditions)
.eval(
"sc_ratio = sqrt(GHJ_on_humerus_in_glenoid_fx ** 2 + GHJ_on_humerus_in_glenoid_fy ** 2) / GHJ_on_humerus_in_glenoid_fz.abs()"
)
.assign(
index=lambda x: x["index"] / 100, ratio_sup=lambda x: x["sc_ratio"] > threshold
)
)
d = d.drop(d.filter(like="GH", axis=1), axis=1)
d.head()
d.drop_duplicates(["filename"]).groupby(["men", "mass"]).size()
balanced_trials = random_balanced_design(d, ["men", "mass"], random_state=RANDOM_SEED)[
"filename"
].to_list()
d = d.query("filename == @balanced_trials")
d.drop_duplicates(["filename"]).groupby(["men", "mass"]).size()
sup_ratio = (
d.groupby(["filename", "men", "mass"])["ratio_sup"]
.apply(lambda x: x.sum() / x.shape[0])
.reset_index()
)
sup_ratio.sample(5)
sup_ratio.shape
men_scale = alt.Scale(scheme="set1")
alt.Chart(sup_ratio).mark_boxplot().encode(
alt.X("men:N", axis=alt.Axis(labels=False, ticks=False, domain=False), title=None),
alt.Y("ratio_sup", axis=alt.Axis(format="%"), title="c:s > 0.56 (% temps total)"),
alt.Color("men:N", scale=men_scale),
).facet(column="mass")
import spm1d
alpha = 0.05
n_iter = 10_000
spm = spm1d.stats.nonparam.anova2(
y=sup_ratio["ratio_sup"], A=sup_ratio["men"], B=sup_ratio["mass"]
)
spmi = spm.inference(alpha=alpha, iterations=n_iter)
spmi
for ispmi in spmi:
if ispmi.h0reject:
print(ispmi.effect)
def cohend(a, b):
d = (a.mean() - b.mean()) / (np.sqrt((a.std() ** 2 + b.std() ** 2) / 2))
if np.abs(d) >= 0.8:
effect = "large"
elif np.abs(d) >= 0.5:
effect = "medium"
elif np.abs(d) >= 0.2:
effect = "small"
else:
effect = "no"
return d, effect
a, b = [i.values for _, i in sup_ratio.groupby("mass")["ratio_sup"]]
print(a.mean() - b.mean())
cohend(a, b)
sup_ratio.query("men == 0")["ratio_sup"].mean() - sup_ratio.query("men == 1")[
"ratio_sup"
].mean()
| 0.613237 | 0.907517 |
```
import pandas as pd
import tensorflow as tf
from official.nlp import optimization as nlp_opt
from official.nlp.bert import tokenization as bert_token
from berts.berts import BertEQAModel
from berts.utils import get_bert_inputs
bert_url = "https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/2"
model, bert_layer = BertEQAModel(
bert_url,
return_cls=True
)
model.summary()
# load vocabulary (must be same as pre-trained bert)
vocab_file = bert_layer.resolved_object.vocab_file.asset_path.numpy()
to_lower_case = bert_layer.resolved_object.do_lower_case.numpy()
bert_tokenizer = bert_token.FullTokenizer(vocab_file, to_lower_case)
print('vocabulary size:', len(bert_tokenizer.vocab))
# use preprocessed SQuAD2.0 partial data
df_ans = pd.read_csv('data/SQuAD/convert_ans_pos/train_answer_pos.csv', na_filter= False)
df_ans = df_ans.drop(columns=['aid'])
df_ans.head()
df_ques = pd.read_csv('data/SQuAD/preprocess/train_question_tokenized.csv', na_filter= False)
df_ques.head()
df_context = pd.read_csv('data/SQuAD/preprocess/train_context_tokenized.csv', na_filter= False)
df_context.head()
# join context, question, answer data together
df_qa = df_ans.join(df_ques.set_index(['cid', 'qid']), how='inner', on=['cid', 'qid']
).join(df_context.set_index('cid'), how='inner', on='cid')
df_qa.head()
df_qa['question'] = [s.split(" ") for s in df_qa['question']]
df_qa['context'] = [s.split(" ") for s in df_qa['context']]
print('original data size:', len(df_qa))
# only use partial data set (context + question < 160), b/c my GPU memory is not big enough for all data
df_qa = df_qa[df_qa.apply(lambda x: (len(x['question']) + len(x['context'])) < 300, axis=1)]
print('using data size:', len(df_qa))
# shuffle data
df_qa = df_qa.sample(frac=1).reset_index(drop=True)
# separate data set into training (80%) and validation (20%) data sets
val_size = int(len(df_qa) * 0.2)
# prepare bert input data
input_words, input_mask, input_seg = get_bert_inputs(bert_tokenizer,
df_qa['question'],
df_qa['context'],
tokenized=True)
question_len = [(len(s) + 1) * df_qa['hasAnswer'][i] for i, s in enumerate(df_qa['question'])] # + 1: <SEP>
train_input_words, train_input_mask, train_input_seg = input_words[:-val_size], input_mask[:-val_size], input_seg[:-val_size]
train_label_cls = tf.constant(df_qa['hasAnswer'][:-val_size])
train_label_start = tf.constant(df_qa['ans_start'][:-val_size]) + question_len[:-val_size]
train_label_end = tf.constant(df_qa['ans_end'][:-val_size]) + question_len[:-val_size]
print('training data shape:', train_input_words.shape, train_input_mask.shape, train_input_seg.shape)
print('training label shape:', train_label_cls.shape, train_label_start.shape, train_label_end.shape)
valid_input_words, valid_input_mask, valid_input_seg = input_words[-val_size:], input_mask[-val_size:], input_seg[-val_size:]
valid_label_cls = tf.constant(df_qa['hasAnswer'][-val_size:])
valid_label_start = tf.constant(df_qa['ans_start'][-val_size:]) + question_len[-val_size:]
valid_label_end = tf.constant(df_qa['ans_end'][-val_size:]) + question_len[-val_size:]
print('validation data shape:', valid_input_words.shape, valid_input_mask.shape, valid_input_seg.shape)
print('validation label shape:', valid_label_cls.shape, valid_label_start.shape, valid_label_end.shape)
def qa_loss(y_true, y_pred):
loss = tf.keras.losses.sparse_categorical_crossentropy(y_true, y_pred, from_logits=False)
mask = tf.cast(tf.math.logical_not(tf.math.equal(y_true, 0)), loss.dtype) # y_true==0: no answer
# loss.shape: (None,), mask.shape: (None, 1) -> squeeze
loss *= tf.squeeze(mask, axis=1)
return tf.math.reduce_sum(loss)/(tf.math.reduce_sum(mask) + 1e-7)
def qa_accuracy(y_true, y_pred):
if y_pred.shape[1] == 1:
return tf.math.reduce_mean(tf.keras.metrics.binary_accuracy(y_true, y_pred))
acc = tf.keras.metrics.sparse_categorical_accuracy(y_true, y_pred)
mask = tf.cast(tf.math.logical_not(tf.math.equal(y_true, 0)), acc.dtype) # y_true==0: no answer
acc *= tf.squeeze(mask, axis=1)
return tf.math.reduce_sum(acc)/(tf.math.reduce_sum(mask) + 1e-7)
batch_size = 8 # b/c my GPU memory is not big enough for bigger batch size
epochs = 3
train_data_size = len(train_label_cls)
steps_per_epoch = int(train_data_size / batch_size)
num_train_steps = steps_per_epoch * epochs
warmup_steps = int(epochs * train_data_size * 0.1 / batch_size)
# creates an optimizer with learning rate schedule
optimizer = nlp_opt.create_optimizer(2e-5, num_train_steps=num_train_steps, num_warmup_steps=warmup_steps)
model.compile(optimizer=optimizer,
loss=[qa_loss, qa_loss, 'binary_crossentropy'],
loss_weights=[1.4, 1, 0.6],
metrics=qa_accuracy)
model.fit([train_input_words, train_input_mask, train_input_seg],
[train_label_start, train_label_end, train_label_cls],
validation_data=([valid_input_words, valid_input_mask, valid_input_seg],
[valid_label_start, valid_label_end, valid_label_cls]),
batch_size=batch_size, epochs=epochs)
def convert_tokens_to_sentence(tokens):
sentence = ''
for i, tok in enumerate(tokens):
if i == 0 or tok.startswith('##'):
sentence += tok.lstrip('#')
else:
sentence += (' ' + tok)
return sentence
def get_validation_prediction(model, tokenizer, idx):
ds, de, cls = model.predict([valid_input_words[idx:idx+1], valid_input_mask[idx:idx+1], valid_input_seg[idx:idx+1]])
ds, de, cls = tf.math.argmax(ds[0]).numpy(), tf.math.argmax(de[0]).numpy(), cls[0][0]
answer = ''
if cls > 0.5:
answer = convert_tokens_to_sentence(tokenizer.convert_ids_to_tokens(valid_input_words[idx][ds:de+1].numpy()))
return cls, ds, de, answer
def output_cls(cls):
if cls > 0.5:
return 'has answer'
return 'no answer'
def output_result(model, tokenizer, idx):
cls, ds, de, answer = get_validation_prediction(model, tokenizer, idx)
print("context:")
print(convert_tokens_to_sentence(df_qa['context'][len(df_qa) - val_size + idx]))
print("question:")
print(convert_tokens_to_sentence(df_qa['question'][len(df_qa) - val_size + idx]))
print("validation data index '%d' prediction:" % (idx))
print("\tcls(%f): %s, ds(%d), de(%d), answer: %s" % (cls, output_cls(cls), ds, de, answer))
print("ground true data index '%d':" % (idx))
print("\tcls(%d): %s, ds(%d), de(%d), answer: %s" %
(valid_label_cls[idx].numpy(), output_cls(valid_label_cls[idx].numpy()),
valid_label_start[idx].numpy(),
valid_label_end[idx].numpy(),
convert_tokens_to_sentence(df_qa['answer'][len(df_qa) - val_size + idx].split(" "))))
output_result(model, bert_tokenizer, 1000)
output_result(model, bert_tokenizer, 1100)
output_result(model, bert_tokenizer, 1110)
```
|
github_jupyter
|
import pandas as pd
import tensorflow as tf
from official.nlp import optimization as nlp_opt
from official.nlp.bert import tokenization as bert_token
from berts.berts import BertEQAModel
from berts.utils import get_bert_inputs
bert_url = "https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/2"
model, bert_layer = BertEQAModel(
bert_url,
return_cls=True
)
model.summary()
# load vocabulary (must be same as pre-trained bert)
vocab_file = bert_layer.resolved_object.vocab_file.asset_path.numpy()
to_lower_case = bert_layer.resolved_object.do_lower_case.numpy()
bert_tokenizer = bert_token.FullTokenizer(vocab_file, to_lower_case)
print('vocabulary size:', len(bert_tokenizer.vocab))
# use preprocessed SQuAD2.0 partial data
df_ans = pd.read_csv('data/SQuAD/convert_ans_pos/train_answer_pos.csv', na_filter= False)
df_ans = df_ans.drop(columns=['aid'])
df_ans.head()
df_ques = pd.read_csv('data/SQuAD/preprocess/train_question_tokenized.csv', na_filter= False)
df_ques.head()
df_context = pd.read_csv('data/SQuAD/preprocess/train_context_tokenized.csv', na_filter= False)
df_context.head()
# join context, question, answer data together
df_qa = df_ans.join(df_ques.set_index(['cid', 'qid']), how='inner', on=['cid', 'qid']
).join(df_context.set_index('cid'), how='inner', on='cid')
df_qa.head()
df_qa['question'] = [s.split(" ") for s in df_qa['question']]
df_qa['context'] = [s.split(" ") for s in df_qa['context']]
print('original data size:', len(df_qa))
# only use partial data set (context + question < 160), b/c my GPU memory is not big enough for all data
df_qa = df_qa[df_qa.apply(lambda x: (len(x['question']) + len(x['context'])) < 300, axis=1)]
print('using data size:', len(df_qa))
# shuffle data
df_qa = df_qa.sample(frac=1).reset_index(drop=True)
# separate data set into training (80%) and validation (20%) data sets
val_size = int(len(df_qa) * 0.2)
# prepare bert input data
input_words, input_mask, input_seg = get_bert_inputs(bert_tokenizer,
df_qa['question'],
df_qa['context'],
tokenized=True)
question_len = [(len(s) + 1) * df_qa['hasAnswer'][i] for i, s in enumerate(df_qa['question'])] # + 1: <SEP>
train_input_words, train_input_mask, train_input_seg = input_words[:-val_size], input_mask[:-val_size], input_seg[:-val_size]
train_label_cls = tf.constant(df_qa['hasAnswer'][:-val_size])
train_label_start = tf.constant(df_qa['ans_start'][:-val_size]) + question_len[:-val_size]
train_label_end = tf.constant(df_qa['ans_end'][:-val_size]) + question_len[:-val_size]
print('training data shape:', train_input_words.shape, train_input_mask.shape, train_input_seg.shape)
print('training label shape:', train_label_cls.shape, train_label_start.shape, train_label_end.shape)
valid_input_words, valid_input_mask, valid_input_seg = input_words[-val_size:], input_mask[-val_size:], input_seg[-val_size:]
valid_label_cls = tf.constant(df_qa['hasAnswer'][-val_size:])
valid_label_start = tf.constant(df_qa['ans_start'][-val_size:]) + question_len[-val_size:]
valid_label_end = tf.constant(df_qa['ans_end'][-val_size:]) + question_len[-val_size:]
print('validation data shape:', valid_input_words.shape, valid_input_mask.shape, valid_input_seg.shape)
print('validation label shape:', valid_label_cls.shape, valid_label_start.shape, valid_label_end.shape)
def qa_loss(y_true, y_pred):
loss = tf.keras.losses.sparse_categorical_crossentropy(y_true, y_pred, from_logits=False)
mask = tf.cast(tf.math.logical_not(tf.math.equal(y_true, 0)), loss.dtype) # y_true==0: no answer
# loss.shape: (None,), mask.shape: (None, 1) -> squeeze
loss *= tf.squeeze(mask, axis=1)
return tf.math.reduce_sum(loss)/(tf.math.reduce_sum(mask) + 1e-7)
def qa_accuracy(y_true, y_pred):
if y_pred.shape[1] == 1:
return tf.math.reduce_mean(tf.keras.metrics.binary_accuracy(y_true, y_pred))
acc = tf.keras.metrics.sparse_categorical_accuracy(y_true, y_pred)
mask = tf.cast(tf.math.logical_not(tf.math.equal(y_true, 0)), acc.dtype) # y_true==0: no answer
acc *= tf.squeeze(mask, axis=1)
return tf.math.reduce_sum(acc)/(tf.math.reduce_sum(mask) + 1e-7)
batch_size = 8 # b/c my GPU memory is not big enough for bigger batch size
epochs = 3
train_data_size = len(train_label_cls)
steps_per_epoch = int(train_data_size / batch_size)
num_train_steps = steps_per_epoch * epochs
warmup_steps = int(epochs * train_data_size * 0.1 / batch_size)
# creates an optimizer with learning rate schedule
optimizer = nlp_opt.create_optimizer(2e-5, num_train_steps=num_train_steps, num_warmup_steps=warmup_steps)
model.compile(optimizer=optimizer,
loss=[qa_loss, qa_loss, 'binary_crossentropy'],
loss_weights=[1.4, 1, 0.6],
metrics=qa_accuracy)
model.fit([train_input_words, train_input_mask, train_input_seg],
[train_label_start, train_label_end, train_label_cls],
validation_data=([valid_input_words, valid_input_mask, valid_input_seg],
[valid_label_start, valid_label_end, valid_label_cls]),
batch_size=batch_size, epochs=epochs)
def convert_tokens_to_sentence(tokens):
sentence = ''
for i, tok in enumerate(tokens):
if i == 0 or tok.startswith('##'):
sentence += tok.lstrip('#')
else:
sentence += (' ' + tok)
return sentence
def get_validation_prediction(model, tokenizer, idx):
ds, de, cls = model.predict([valid_input_words[idx:idx+1], valid_input_mask[idx:idx+1], valid_input_seg[idx:idx+1]])
ds, de, cls = tf.math.argmax(ds[0]).numpy(), tf.math.argmax(de[0]).numpy(), cls[0][0]
answer = ''
if cls > 0.5:
answer = convert_tokens_to_sentence(tokenizer.convert_ids_to_tokens(valid_input_words[idx][ds:de+1].numpy()))
return cls, ds, de, answer
def output_cls(cls):
if cls > 0.5:
return 'has answer'
return 'no answer'
def output_result(model, tokenizer, idx):
cls, ds, de, answer = get_validation_prediction(model, tokenizer, idx)
print("context:")
print(convert_tokens_to_sentence(df_qa['context'][len(df_qa) - val_size + idx]))
print("question:")
print(convert_tokens_to_sentence(df_qa['question'][len(df_qa) - val_size + idx]))
print("validation data index '%d' prediction:" % (idx))
print("\tcls(%f): %s, ds(%d), de(%d), answer: %s" % (cls, output_cls(cls), ds, de, answer))
print("ground true data index '%d':" % (idx))
print("\tcls(%d): %s, ds(%d), de(%d), answer: %s" %
(valid_label_cls[idx].numpy(), output_cls(valid_label_cls[idx].numpy()),
valid_label_start[idx].numpy(),
valid_label_end[idx].numpy(),
convert_tokens_to_sentence(df_qa['answer'][len(df_qa) - val_size + idx].split(" "))))
output_result(model, bert_tokenizer, 1000)
output_result(model, bert_tokenizer, 1100)
output_result(model, bert_tokenizer, 1110)
| 0.730578 | 0.326271 |
```
#default_exp data.transforms
#export
from fastai.torch_basics import *
from fastai.data.core import *
from fastai.data.load import *
from fastai.data.external import *
from sklearn.model_selection import train_test_split
#hide
from nbdev.showdoc import *
```
# Helper functions for processing data and basic transforms
> Functions for getting, splitting, and labeling data, as well as generic transforms
## Get, split, and label
For most data source creation we need functions to get a list of items, split them in to train/valid sets, and label them. fastai provides functions to make each of these steps easy (especially when combined with `fastai.data.blocks`).
### Get
First we'll look at functions that *get* a list of items (generally file names).
We'll use *tiny MNIST* (a subset of MNIST with just two classes, `7`s and `3`s) for our examples/tests throughout this page.
```
path = untar_data(URLs.MNIST_TINY)
(path/'train').ls()
# export
def _get_files(p, fs, extensions=None):
p = Path(p)
res = [p/f for f in fs if not f.startswith('.')
and ((not extensions) or f'.{f.split(".")[-1].lower()}' in extensions)]
return res
# export
def get_files(path, extensions=None, recurse=True, folders=None, followlinks=True):
"Get all the files in `path` with optional `extensions`, optionally with `recurse`, only in `folders`, if specified."
path = Path(path)
folders=L(folders)
extensions = setify(extensions)
extensions = {e.lower() for e in extensions}
if recurse:
res = []
for i,(p,d,f) in enumerate(os.walk(path, followlinks=followlinks)): # returns (dirpath, dirnames, filenames)
if len(folders) !=0 and i==0: d[:] = [o for o in d if o in folders]
else: d[:] = [o for o in d if not o.startswith('.')]
if len(folders) !=0 and i==0 and '.' not in folders: continue
res += _get_files(p, f, extensions)
else:
f = [o.name for o in os.scandir(path) if o.is_file()]
res = _get_files(path, f, extensions)
return L(res)
```
This is the most general way to grab a bunch of file names from disk. If you pass `extensions` (including the `.`) then returned file names are filtered by that list. Only those files directly in `path` are included, unless you pass `recurse`, in which case all child folders are also searched recursively. `folders` is an optional list of directories to limit the search to.
```
t3 = get_files(path/'train'/'3', extensions='.png', recurse=False)
t7 = get_files(path/'train'/'7', extensions='.png', recurse=False)
t = get_files(path/'train', extensions='.png', recurse=True)
test_eq(len(t), len(t3)+len(t7))
test_eq(len(get_files(path/'train'/'3', extensions='.jpg', recurse=False)),0)
test_eq(len(t), len(get_files(path, extensions='.png', recurse=True, folders='train')))
t
#hide
test_eq(len(get_files(path/'train'/'3', recurse=False)),346)
test_eq(len(get_files(path, extensions='.png', recurse=True, folders=['train', 'test'])),729)
test_eq(len(get_files(path, extensions='.png', recurse=True, folders='train')),709)
test_eq(len(get_files(path, extensions='.png', recurse=True, folders='training')),0)
```
It's often useful to be able to create functions with customized behavior. `fastai.data` generally uses functions named as CamelCase verbs ending in `er` to create these functions. `FileGetter` is a simple example of such a function creator.
```
#export
def FileGetter(suf='', extensions=None, recurse=True, folders=None):
"Create `get_files` partial function that searches path suffix `suf`, only in `folders`, if specified, and passes along args"
def _inner(o, extensions=extensions, recurse=recurse, folders=folders):
return get_files(o/suf, extensions, recurse, folders)
return _inner
fpng = FileGetter(extensions='.png', recurse=False)
test_eq(len(t7), len(fpng(path/'train'/'7')))
test_eq(len(t), len(fpng(path/'train', recurse=True)))
fpng_r = FileGetter(extensions='.png', recurse=True)
test_eq(len(t), len(fpng_r(path/'train')))
#export
image_extensions = set(k for k,v in mimetypes.types_map.items() if v.startswith('image/'))
#export
def get_image_files(path, recurse=True, folders=None):
"Get image files in `path` recursively, only in `folders`, if specified."
return get_files(path, extensions=image_extensions, recurse=recurse, folders=folders)
```
This is simply `get_files` called with a list of standard image extensions.
```
test_eq(len(t), len(get_image_files(path, recurse=True, folders='train')))
#export
def ImageGetter(suf='', recurse=True, folders=None):
"Create `get_image_files` partial function that searches path suffix `suf` and passes along `kwargs`, only in `folders`, if specified."
def _inner(o, recurse=recurse, folders=folders): return get_image_files(o/suf, recurse, folders)
return _inner
```
Same as `FileGetter`, but for image extensions.
```
test_eq(len(get_files(path/'train', extensions='.png', recurse=True, folders='3')),
len(ImageGetter( 'train', recurse=True, folders='3')(path)))
#export
def get_text_files(path, recurse=True, folders=None):
"Get text files in `path` recursively, only in `folders`, if specified."
return get_files(path, extensions=['.txt'], recurse=recurse, folders=folders)
#export
class ItemGetter(ItemTransform):
"Creates a proper transform that applies `itemgetter(i)` (even on a tuple)"
_retain = False
def __init__(self, i): self.i = i
def encodes(self, x): return x[self.i]
test_eq(ItemGetter(1)((1,2,3)), 2)
test_eq(ItemGetter(1)(L(1,2,3)), 2)
test_eq(ItemGetter(1)([1,2,3]), 2)
test_eq(ItemGetter(1)(np.array([1,2,3])), 2)
#export
class AttrGetter(ItemTransform):
"Creates a proper transform that applies `attrgetter(nm)` (even on a tuple)"
_retain = False
def __init__(self, nm, default=None): store_attr()
def encodes(self, x): return getattr(x, self.nm, self.default)
test_eq(AttrGetter('shape')(torch.randn([4,5])), [4,5])
test_eq(AttrGetter('shape', [0])([4,5]), [0])
```
### Split
The next set of functions are used to *split* data into training and validation sets. The functions return two lists - a list of indices or masks for each of training and validation sets.
```
# export
def RandomSplitter(valid_pct=0.2, seed=None):
"Create function that splits `items` between train/val with `valid_pct` randomly."
def _inner(o):
if seed is not None: torch.manual_seed(seed)
rand_idx = L(int(i) for i in torch.randperm(len(o)))
cut = int(valid_pct * len(o))
return rand_idx[cut:],rand_idx[:cut]
return _inner
src = list(range(30))
f = RandomSplitter(seed=42)
trn,val = f(src)
assert 0<len(trn)<len(src)
assert all(o not in val for o in trn)
test_eq(len(trn), len(src)-len(val))
# test random seed consistency
test_eq(f(src)[0], trn)
```
Use scikit-learn train_test_split. This allow to *split* items in a stratified fashion (uniformely according to the ‘labels‘ distribution)
```
# export
def TrainTestSplitter(test_size=0.2, random_state=None, stratify=None, train_size=None, shuffle=True):
"Split `items` into random train and test subsets using sklearn train_test_split utility."
def _inner(o, **kwargs):
train, valid = train_test_split(range(len(o)), test_size=test_size, random_state=random_state, stratify=stratify, train_size=train_size, shuffle=shuffle)
return L(train), L(valid)
return _inner
src = list(range(30))
labels = [0] * 20 + [1] * 10
test_size = 0.2
f = TrainTestSplitter(test_size=test_size, random_state=42, stratify=labels)
trn,val = f(src)
assert 0<len(trn)<len(src)
assert all(o not in val for o in trn)
test_eq(len(trn), len(src)-len(val))
# test random seed consistency
test_eq(f(src)[0], trn)
# test labels distribution consistency
# there should be test_size % of zeroes and ones respectively in the validation set
test_eq(len([t for t in val if t < 20]) / 20, test_size)
test_eq(len([t for t in val if t > 20]) / 10, test_size)
#export
def IndexSplitter(valid_idx):
"Split `items` so that `val_idx` are in the validation set and the others in the training set"
def _inner(o):
train_idx = np.setdiff1d(np.array(range_of(o)), np.array(valid_idx))
return L(train_idx, use_list=True), L(valid_idx, use_list=True)
return _inner
items = list(range(10))
splitter = IndexSplitter([3,7,9])
test_eq(splitter(items),[[0,1,2,4,5,6,8],[3,7,9]])
# export
def _grandparent_idxs(items, name):
def _inner(items, name): return mask2idxs(Path(o).parent.parent.name == name for o in items)
return [i for n in L(name) for i in _inner(items,n)]
# export
def GrandparentSplitter(train_name='train', valid_name='valid'):
"Split `items` from the grand parent folder names (`train_name` and `valid_name`)."
def _inner(o):
return _grandparent_idxs(o, train_name),_grandparent_idxs(o, valid_name)
return _inner
fnames = [path/'train/3/9932.png', path/'valid/7/7189.png',
path/'valid/7/7320.png', path/'train/7/9833.png',
path/'train/3/7666.png', path/'valid/3/925.png',
path/'train/7/724.png', path/'valid/3/93055.png']
splitter = GrandparentSplitter()
test_eq(splitter(fnames),[[0,3,4,6],[1,2,5,7]])
fnames2 = fnames + [path/'test/3/4256.png', path/'test/7/2345.png', path/'valid/7/6467.png']
splitter = GrandparentSplitter(train_name=('train', 'valid'), valid_name='test')
test_eq(splitter(fnames2),[[0,3,4,6,1,2,5,7,10],[8,9]])
# export
def FuncSplitter(func):
"Split `items` by result of `func` (`True` for validation, `False` for training set)."
def _inner(o):
val_idx = mask2idxs(func(o_) for o_ in o)
return IndexSplitter(val_idx)(o)
return _inner
splitter = FuncSplitter(lambda o: Path(o).parent.parent.name == 'valid')
test_eq(splitter(fnames),[[0,3,4,6],[1,2,5,7]])
# export
def MaskSplitter(mask):
"Split `items` depending on the value of `mask`."
def _inner(o): return IndexSplitter(mask2idxs(mask))(o)
return _inner
items = list(range(6))
splitter = MaskSplitter([True,False,False,True,False,True])
test_eq(splitter(items),[[1,2,4],[0,3,5]])
# export
def FileSplitter(fname):
"Split `items` by providing file `fname` (contains names of valid items separated by newline)."
valid = Path(fname).read().split('\n')
def _func(x): return x.name in valid
def _inner(o): return FuncSplitter(_func)(o)
return _inner
with tempfile.TemporaryDirectory() as d:
fname = Path(d)/'valid.txt'
fname.write('\n'.join([Path(fnames[i]).name for i in [1,3,4]]))
splitter = FileSplitter(fname)
test_eq(splitter(fnames),[[0,2,5,6,7],[1,3,4]])
# export
def ColSplitter(col='is_valid'):
"Split `items` (supposed to be a dataframe) by value in `col`"
def _inner(o):
assert isinstance(o, pd.DataFrame), "ColSplitter only works when your items are a pandas DataFrame"
valid_idx = (o.iloc[:,col] if isinstance(col, int) else o[col]).values.astype('bool')
return IndexSplitter(mask2idxs(valid_idx))(o)
return _inner
df = pd.DataFrame({'a': [0,1,2,3,4], 'b': [True,False,True,True,False]})
splits = ColSplitter('b')(df)
test_eq(splits, [[1,4], [0,2,3]])
#Works with strings or index
splits = ColSplitter(1)(df)
test_eq(splits, [[1,4], [0,2,3]])
# does not get confused if the type of 'is_valid' is integer, but it meant to be a yes/no
df = pd.DataFrame({'a': [0,1,2,3,4], 'is_valid': [1,0,1,1,0]})
splits_by_int = ColSplitter('is_valid')(df)
test_eq(splits_by_int, [[1,4], [0,2,3]])
# export
def RandomSubsetSplitter(train_sz, valid_sz, seed=None):
"Take randoms subsets of `splits` with `train_sz` and `valid_sz`"
assert 0 < train_sz < 1
assert 0 < valid_sz < 1
assert train_sz + valid_sz <= 1.
def _inner(o):
if seed is not None: torch.manual_seed(seed)
train_len,valid_len = int(len(o)*train_sz),int(len(o)*valid_sz)
idxs = L(int(i) for i in torch.randperm(len(o)))
return idxs[:train_len],idxs[train_len:train_len+valid_len]
return _inner
items = list(range(100))
valid_idx = list(np.arange(70,100))
splits = RandomSubsetSplitter(0.3, 0.1)(items)
test_eq(len(splits[0]), 30)
test_eq(len(splits[1]), 10)
```
### Label
The final set of functions is used to *label* a single item of data.
```
# export
def parent_label(o):
"Label `item` with the parent folder name."
return Path(o).parent.name
```
Note that `parent_label` doesn't have anything customize, so it doesn't return a function - you can just use it directly.
```
test_eq(parent_label(fnames[0]), '3')
test_eq(parent_label("fastai_dev/dev/data/mnist_tiny/train/3/9932.png"), '3')
[parent_label(o) for o in fnames]
#hide
#test for MS Windows when os.path.sep is '\\' instead of '/'
test_eq(parent_label(os.path.join("fastai_dev","dev","data","mnist_tiny","train", "3", "9932.png") ), '3')
# export
class RegexLabeller():
"Label `item` with regex `pat`."
def __init__(self, pat, match=False):
self.pat = re.compile(pat)
self.matcher = self.pat.match if match else self.pat.search
def __call__(self, o):
res = self.matcher(str(o))
assert res,f'Failed to find "{self.pat}" in "{o}"'
return res.group(1)
```
`RegexLabeller` is a very flexible function since it handles any regex search of the stringified item. Pass `match=True` to use `re.match` (i.e. check only start of string), or `re.search` otherwise (default).
For instance, here's an example the replicates the previous `parent_label` results.
```
f = RegexLabeller(fr'{os.path.sep}(\d){os.path.sep}')
test_eq(f(fnames[0]), '3')
[f(o) for o in fnames]
f = RegexLabeller(r'(\d*)', match=True)
test_eq(f(fnames[0].name), '9932')
#export
class ColReader(DisplayedTransform):
"Read `cols` in `row` with potential `pref` and `suff`"
def __init__(self, cols, pref='', suff='', label_delim=None):
store_attr()
self.pref = str(pref) + os.path.sep if isinstance(pref, Path) else pref
self.cols = L(cols)
def _do_one(self, r, c):
o = r[c] if isinstance(c, int) else r[c] if c=='name' else getattr(r, c)
if len(self.pref)==0 and len(self.suff)==0 and self.label_delim is None: return o
if self.label_delim is None: return f'{self.pref}{o}{self.suff}'
else: return o.split(self.label_delim) if len(o)>0 else []
def __call__(self, o, **kwargs):
if len(self.cols) == 1: return self._do_one(o, self.cols[0])
return L(self._do_one(o, c) for c in self.cols)
```
`cols` can be a list of column names or a list of indices (or a mix of both). If `label_delim` is passed, the result is split using it.
```
df = pd.DataFrame({'a': 'a b c d'.split(), 'b': ['1 2', '0', '', '1 2 3']})
f = ColReader('a', pref='0', suff='1')
test_eq([f(o) for o in df.itertuples()], '0a1 0b1 0c1 0d1'.split())
f = ColReader('b', label_delim=' ')
test_eq([f(o) for o in df.itertuples()], [['1', '2'], ['0'], [], ['1', '2', '3']])
df['a1'] = df['a']
f = ColReader(['a', 'a1'], pref='0', suff='1')
test_eq([f(o) for o in df.itertuples()], [L('0a1', '0a1'), L('0b1', '0b1'), L('0c1', '0c1'), L('0d1', '0d1')])
df = pd.DataFrame({'a': [L(0,1), L(2,3,4), L(5,6,7)]})
f = ColReader('a')
test_eq([f(o) for o in df.itertuples()], [L(0,1), L(2,3,4), L(5,6,7)])
df['name'] = df['a']
f = ColReader('name')
test_eq([f(df.iloc[0,:])], [L(0,1)])
```
## Categorize -
```
#export
class CategoryMap(CollBase):
"Collection of categories with the reverse mapping in `o2i`"
def __init__(self, col, sort=True, add_na=False, strict=False):
if is_categorical_dtype(col):
items = L(col.cat.categories, use_list=True)
#Remove non-used categories while keeping order
if strict: items = L(o for o in items if o in col.unique())
else:
if not hasattr(col,'unique'): col = L(col, use_list=True)
# `o==o` is the generalized definition of non-NaN used by Pandas
items = L(o for o in col.unique() if o==o)
if sort: items = items.sorted()
self.items = '#na#' + items if add_na else items
self.o2i = defaultdict(int, self.items.val2idx()) if add_na else dict(self.items.val2idx())
def map_objs(self,objs):
"Map `objs` to IDs"
return L(self.o2i[o] for o in objs)
def map_ids(self,ids):
"Map `ids` to objects in vocab"
return L(self.items[o] for o in ids)
def __eq__(self,b): return all_equal(b,self)
t = CategoryMap([4,2,3,4])
test_eq(t, [2,3,4])
test_eq(t.o2i, {2:0,3:1,4:2})
test_eq(t.map_objs([2,3]), [0,1])
test_eq(t.map_ids([0,1]), [2,3])
test_fail(lambda: t.o2i['unseen label'])
t = CategoryMap([4,2,3,4], add_na=True)
test_eq(t, ['#na#',2,3,4])
test_eq(t.o2i, {'#na#':0,2:1,3:2,4:3})
t = CategoryMap(pd.Series([4,2,3,4]), sort=False)
test_eq(t, [4,2,3])
test_eq(t.o2i, {4:0,2:1,3:2})
col = pd.Series(pd.Categorical(['M','H','L','M'], categories=['H','M','L'], ordered=True))
t = CategoryMap(col)
test_eq(t, ['H','M','L'])
test_eq(t.o2i, {'H':0,'M':1,'L':2})
col = pd.Series(pd.Categorical(['M','H','M'], categories=['H','M','L'], ordered=True))
t = CategoryMap(col, strict=True)
test_eq(t, ['H','M'])
test_eq(t.o2i, {'H':0,'M':1})
# export
class Categorize(DisplayedTransform):
"Reversible transform of category string to `vocab` id"
loss_func,order=CrossEntropyLossFlat(),1
def __init__(self, vocab=None, sort=True, add_na=False):
if vocab is not None: vocab = CategoryMap(vocab, sort=sort, add_na=add_na)
store_attr()
def setups(self, dsets):
if self.vocab is None and dsets is not None: self.vocab = CategoryMap(dsets, sort=self.sort, add_na=self.add_na)
self.c = len(self.vocab)
def encodes(self, o): return TensorCategory(self.vocab.o2i[o])
def decodes(self, o): return Category (self.vocab [o])
#export
class Category(str, ShowTitle): _show_args = {'label': 'category'}
cat = Categorize()
tds = Datasets(['cat', 'dog', 'cat'], tfms=[cat])
test_eq(cat.vocab, ['cat', 'dog'])
test_eq(cat('cat'), 0)
test_eq(cat.decode(1), 'dog')
test_stdout(lambda: show_at(tds,2), 'cat')
cat = Categorize(add_na=True)
tds = Datasets(['cat', 'dog', 'cat'], tfms=[cat])
test_eq(cat.vocab, ['#na#', 'cat', 'dog'])
test_eq(cat('cat'), 1)
test_eq(cat.decode(2), 'dog')
test_stdout(lambda: show_at(tds,2), 'cat')
cat = Categorize(vocab=['dog', 'cat'], sort=False, add_na=True)
tds = Datasets(['cat', 'dog', 'cat'], tfms=[cat])
test_eq(cat.vocab, ['#na#', 'dog', 'cat'])
test_eq(cat('dog'), 1)
test_eq(cat.decode(2), 'cat')
test_stdout(lambda: show_at(tds,2), 'cat')
```
## Multicategorize -
```
# export
class MultiCategorize(Categorize):
"Reversible transform of multi-category strings to `vocab` id"
loss_func,order=BCEWithLogitsLossFlat(),1
def __init__(self, vocab=None, add_na=False): super().__init__(vocab=vocab,add_na=add_na)
def setups(self, dsets):
if not dsets: return
if self.vocab is None:
vals = set()
for b in dsets: vals = vals.union(set(b))
self.vocab = CategoryMap(list(vals), add_na=self.add_na)
def encodes(self, o): return TensorMultiCategory([self.vocab.o2i[o_] for o_ in o])
def decodes(self, o): return MultiCategory ([self.vocab [o_] for o_ in o])
#export
class MultiCategory(L):
def show(self, ctx=None, sep=';', color='black', **kwargs):
return show_title(sep.join(self.map(str)), ctx=ctx, color=color, **kwargs)
cat = MultiCategorize()
tds = Datasets([['b', 'c'], ['a'], ['a', 'c'], []], tfms=[cat])
test_eq(tds[3][0], TensorMultiCategory([]))
test_eq(cat.vocab, ['a', 'b', 'c'])
test_eq(cat(['a', 'c']), tensor([0,2]))
test_eq(cat([]), tensor([]))
test_eq(cat.decode([1]), ['b'])
test_eq(cat.decode([0,2]), ['a', 'c'])
test_stdout(lambda: show_at(tds,2), 'a;c')
# export
class OneHotEncode(DisplayedTransform):
"One-hot encodes targets"
order=2
def __init__(self, c=None): store_attr()
def setups(self, dsets):
if self.c is None: self.c = len(L(getattr(dsets, 'vocab', None)))
if not self.c: warn("Couldn't infer the number of classes, please pass a value for `c` at init")
def encodes(self, o): return TensorMultiCategory(one_hot(o, self.c).float())
def decodes(self, o): return one_hot_decode(o, None)
```
Works in conjunction with ` MultiCategorize` or on its own if you have one-hot encoded targets (pass a `vocab` for decoding and `do_encode=False` in this case)
```
_tfm = OneHotEncode(c=3)
test_eq(_tfm([0,2]), tensor([1.,0,1]))
test_eq(_tfm.decode(tensor([0,1,1])), [1,2])
tds = Datasets([['b', 'c'], ['a'], ['a', 'c'], []], [[MultiCategorize(), OneHotEncode()]])
test_eq(tds[1], [tensor([1.,0,0])])
test_eq(tds[3], [tensor([0.,0,0])])
test_eq(tds.decode([tensor([False, True, True])]), [['b','c']])
test_eq(type(tds[1][0]), TensorMultiCategory)
test_stdout(lambda: show_at(tds,2), 'a;c')
#hide
#test with passing the vocab
tds = Datasets([['b', 'c'], ['a'], ['a', 'c'], []], [[MultiCategorize(vocab=['a', 'b', 'c']), OneHotEncode()]])
test_eq(tds[1], [tensor([1.,0,0])])
test_eq(tds[3], [tensor([0.,0,0])])
test_eq(tds.decode([tensor([False, True, True])]), [['b','c']])
test_eq(type(tds[1][0]), TensorMultiCategory)
test_stdout(lambda: show_at(tds,2), 'a;c')
# export
class EncodedMultiCategorize(Categorize):
"Transform of one-hot encoded multi-category that decodes with `vocab`"
loss_func,order=BCEWithLogitsLossFlat(),1
def __init__(self, vocab):
super().__init__(vocab)
self.c = len(vocab)
def encodes(self, o): return TensorMultiCategory(tensor(o).float())
def decodes(self, o): return MultiCategory (one_hot_decode(o, self.vocab))
_tfm = EncodedMultiCategorize(vocab=['a', 'b', 'c'])
test_eq(_tfm([1,0,1]), tensor([1., 0., 1.]))
test_eq(type(_tfm([1,0,1])), TensorMultiCategory)
test_eq(_tfm.decode(tensor([False, True, True])), ['b','c'])
_tfm
#export
class RegressionSetup(DisplayedTransform):
"Transform that floatifies targets"
loss_func=MSELossFlat()
def __init__(self, c=None): store_attr()
def encodes(self, o): return tensor(o).float()
def decodes(self, o): return TitledFloat(o) if o.ndim==0 else TitledTuple(o_.item() for o_ in o)
def setups(self, dsets):
if self.c is not None: return
try: self.c = len(dsets[0]) if hasattr(dsets[0], '__len__') else 1
except: self.c = 0
_tfm = RegressionSetup()
dsets = Datasets([0, 1, 2], RegressionSetup)
test_eq(dsets.c, 1)
test_eq_type(dsets[0], (tensor(0.),))
dsets = Datasets([[0, 1, 2], [3,4,5]], RegressionSetup)
test_eq(dsets.c, 3)
test_eq_type(dsets[0], (tensor([0.,1.,2.]),))
#export
def get_c(dls):
if getattr(dls, 'c', False): return dls.c
if getattr(getattr(dls.train, 'after_item', None), 'c', False): return dls.train.after_item.c
if getattr(getattr(dls.train, 'after_batch', None), 'c', False): return dls.train.after_batch.c
vocab = getattr(dls, 'vocab', [])
if len(vocab) > 0 and is_listy(vocab[-1]): vocab = vocab[-1]
return len(vocab)
```
## End-to-end dataset example with MNIST
Let's show how to use those functions to grab the mnist dataset in a `Datasets`. First we grab all the images.
```
path = untar_data(URLs.MNIST_TINY)
items = get_image_files(path)
```
Then we split between train and validation depending on the folder.
```
splitter = GrandparentSplitter()
splits = splitter(items)
train,valid = (items[i] for i in splits)
train[:3],valid[:3]
```
Our inputs are images that we open and convert to tensors, our targets are labeled depending on the parent directory and are categories.
```
from PIL import Image
def open_img(fn:Path): return Image.open(fn).copy()
def img2tensor(im:Image.Image): return TensorImage(array(im)[None])
tfms = [[open_img, img2tensor],
[parent_label, Categorize()]]
train_ds = Datasets(train, tfms)
x,y = train_ds[3]
xd,yd = decode_at(train_ds,3)
test_eq(parent_label(train[3]),yd)
test_eq(array(Image.open(train[3])),xd[0].numpy())
ax = show_at(train_ds, 3, cmap="Greys", figsize=(1,1))
assert ax.title.get_text() in ('3','7')
test_fig_exists(ax)
```
## ToTensor -
```
#export
class ToTensor(Transform):
"Convert item to appropriate tensor class"
order = 5
```
## IntToFloatTensor -
```
# export
class IntToFloatTensor(DisplayedTransform):
"Transform image to float tensor, optionally dividing by 255 (e.g. for images)."
order = 10 #Need to run after PIL transforms on the GPU
def __init__(self, div=255., div_mask=1): store_attr()
def encodes(self, o:TensorImage): return o.float().div_(self.div)
def encodes(self, o:TensorMask ): return o.long() // self.div_mask
def decodes(self, o:TensorImage): return ((o.clamp(0., 1.) * self.div).long()) if self.div else o
t = (TensorImage(tensor(1)),tensor(2).long(),TensorMask(tensor(3)))
tfm = IntToFloatTensor()
ft = tfm(t)
test_eq(ft, [1./255, 2, 3])
test_eq(type(ft[0]), TensorImage)
test_eq(type(ft[2]), TensorMask)
test_eq(ft[0].type(),'torch.FloatTensor')
test_eq(ft[1].type(),'torch.LongTensor')
test_eq(ft[2].type(),'torch.LongTensor')
```
## Normalization -
```
# export
def broadcast_vec(dim, ndim, *t, cuda=True):
"Make a vector broadcastable over `dim` (out of `ndim` total) by prepending and appending unit axes"
v = [1]*ndim
v[dim] = -1
f = to_device if cuda else noop
return [f(tensor(o).view(*v)) for o in t]
# export
@docs
class Normalize(DisplayedTransform):
"Normalize/denorm batch of `TensorImage`"
parameters,order = L('mean', 'std'),99
def __init__(self, mean=None, std=None, axes=(0,2,3)): store_attr()
@classmethod
def from_stats(cls, mean, std, dim=1, ndim=4, cuda=True): return cls(*broadcast_vec(dim, ndim, mean, std, cuda=cuda))
def setups(self, dl:DataLoader):
if self.mean is None or self.std is None:
x,*_ = dl.one_batch()
self.mean,self.std = x.mean(self.axes, keepdim=True),x.std(self.axes, keepdim=True)+1e-7
def encodes(self, x:TensorImage): return (x-self.mean) / self.std
def decodes(self, x:TensorImage):
f = to_cpu if x.device.type=='cpu' else noop
return (x*f(self.std) + f(self.mean))
_docs=dict(encodes="Normalize batch", decodes="Denormalize batch")
mean,std = [0.5]*3,[0.5]*3
mean,std = broadcast_vec(1, 4, mean, std)
batch_tfms = [IntToFloatTensor(), Normalize.from_stats(mean,std)]
tdl = TfmdDL(train_ds, after_batch=batch_tfms, bs=4, device=default_device())
x,y = tdl.one_batch()
xd,yd = tdl.decode((x,y))
test_eq(x.type(), 'torch.cuda.FloatTensor' if default_device().type=='cuda' else 'torch.FloatTensor')
test_eq(xd.type(), 'torch.LongTensor')
test_eq(type(x), TensorImage)
test_eq(type(y), TensorCategory)
assert x.mean()<0.0
assert x.std()>0.5
assert 0<xd.float().mean()/255.<1
assert 0<xd.float().std()/255.<0.5
#hide
nrm = Normalize()
batch_tfms = [IntToFloatTensor(), nrm]
tdl = TfmdDL(train_ds, after_batch=batch_tfms, bs=4)
x,y = tdl.one_batch()
test_close(x.mean(), 0.0, 1e-4)
assert x.std()>0.9, x.std()
#Just for visuals
from fastai.vision.core import *
tdl.show_batch((x,y))
#hide
#TODO: make the above check a proper test
x,y = torch.add(x,0),torch.add(y,0) #Lose type of tensors (to emulate predictions)
test_ne(type(x), TensorImage)
tdl.show_batch((x,y), figsize=(1,1)) #Check that types are put back by dl.
```
## Export -
```
#hide
from nbdev.export import notebook2script
notebook2script()
```
|
github_jupyter
|
#default_exp data.transforms
#export
from fastai.torch_basics import *
from fastai.data.core import *
from fastai.data.load import *
from fastai.data.external import *
from sklearn.model_selection import train_test_split
#hide
from nbdev.showdoc import *
path = untar_data(URLs.MNIST_TINY)
(path/'train').ls()
# export
def _get_files(p, fs, extensions=None):
p = Path(p)
res = [p/f for f in fs if not f.startswith('.')
and ((not extensions) or f'.{f.split(".")[-1].lower()}' in extensions)]
return res
# export
def get_files(path, extensions=None, recurse=True, folders=None, followlinks=True):
"Get all the files in `path` with optional `extensions`, optionally with `recurse`, only in `folders`, if specified."
path = Path(path)
folders=L(folders)
extensions = setify(extensions)
extensions = {e.lower() for e in extensions}
if recurse:
res = []
for i,(p,d,f) in enumerate(os.walk(path, followlinks=followlinks)): # returns (dirpath, dirnames, filenames)
if len(folders) !=0 and i==0: d[:] = [o for o in d if o in folders]
else: d[:] = [o for o in d if not o.startswith('.')]
if len(folders) !=0 and i==0 and '.' not in folders: continue
res += _get_files(p, f, extensions)
else:
f = [o.name for o in os.scandir(path) if o.is_file()]
res = _get_files(path, f, extensions)
return L(res)
t3 = get_files(path/'train'/'3', extensions='.png', recurse=False)
t7 = get_files(path/'train'/'7', extensions='.png', recurse=False)
t = get_files(path/'train', extensions='.png', recurse=True)
test_eq(len(t), len(t3)+len(t7))
test_eq(len(get_files(path/'train'/'3', extensions='.jpg', recurse=False)),0)
test_eq(len(t), len(get_files(path, extensions='.png', recurse=True, folders='train')))
t
#hide
test_eq(len(get_files(path/'train'/'3', recurse=False)),346)
test_eq(len(get_files(path, extensions='.png', recurse=True, folders=['train', 'test'])),729)
test_eq(len(get_files(path, extensions='.png', recurse=True, folders='train')),709)
test_eq(len(get_files(path, extensions='.png', recurse=True, folders='training')),0)
#export
def FileGetter(suf='', extensions=None, recurse=True, folders=None):
"Create `get_files` partial function that searches path suffix `suf`, only in `folders`, if specified, and passes along args"
def _inner(o, extensions=extensions, recurse=recurse, folders=folders):
return get_files(o/suf, extensions, recurse, folders)
return _inner
fpng = FileGetter(extensions='.png', recurse=False)
test_eq(len(t7), len(fpng(path/'train'/'7')))
test_eq(len(t), len(fpng(path/'train', recurse=True)))
fpng_r = FileGetter(extensions='.png', recurse=True)
test_eq(len(t), len(fpng_r(path/'train')))
#export
image_extensions = set(k for k,v in mimetypes.types_map.items() if v.startswith('image/'))
#export
def get_image_files(path, recurse=True, folders=None):
"Get image files in `path` recursively, only in `folders`, if specified."
return get_files(path, extensions=image_extensions, recurse=recurse, folders=folders)
test_eq(len(t), len(get_image_files(path, recurse=True, folders='train')))
#export
def ImageGetter(suf='', recurse=True, folders=None):
"Create `get_image_files` partial function that searches path suffix `suf` and passes along `kwargs`, only in `folders`, if specified."
def _inner(o, recurse=recurse, folders=folders): return get_image_files(o/suf, recurse, folders)
return _inner
test_eq(len(get_files(path/'train', extensions='.png', recurse=True, folders='3')),
len(ImageGetter( 'train', recurse=True, folders='3')(path)))
#export
def get_text_files(path, recurse=True, folders=None):
"Get text files in `path` recursively, only in `folders`, if specified."
return get_files(path, extensions=['.txt'], recurse=recurse, folders=folders)
#export
class ItemGetter(ItemTransform):
"Creates a proper transform that applies `itemgetter(i)` (even on a tuple)"
_retain = False
def __init__(self, i): self.i = i
def encodes(self, x): return x[self.i]
test_eq(ItemGetter(1)((1,2,3)), 2)
test_eq(ItemGetter(1)(L(1,2,3)), 2)
test_eq(ItemGetter(1)([1,2,3]), 2)
test_eq(ItemGetter(1)(np.array([1,2,3])), 2)
#export
class AttrGetter(ItemTransform):
"Creates a proper transform that applies `attrgetter(nm)` (even on a tuple)"
_retain = False
def __init__(self, nm, default=None): store_attr()
def encodes(self, x): return getattr(x, self.nm, self.default)
test_eq(AttrGetter('shape')(torch.randn([4,5])), [4,5])
test_eq(AttrGetter('shape', [0])([4,5]), [0])
# export
def RandomSplitter(valid_pct=0.2, seed=None):
"Create function that splits `items` between train/val with `valid_pct` randomly."
def _inner(o):
if seed is not None: torch.manual_seed(seed)
rand_idx = L(int(i) for i in torch.randperm(len(o)))
cut = int(valid_pct * len(o))
return rand_idx[cut:],rand_idx[:cut]
return _inner
src = list(range(30))
f = RandomSplitter(seed=42)
trn,val = f(src)
assert 0<len(trn)<len(src)
assert all(o not in val for o in trn)
test_eq(len(trn), len(src)-len(val))
# test random seed consistency
test_eq(f(src)[0], trn)
# export
def TrainTestSplitter(test_size=0.2, random_state=None, stratify=None, train_size=None, shuffle=True):
"Split `items` into random train and test subsets using sklearn train_test_split utility."
def _inner(o, **kwargs):
train, valid = train_test_split(range(len(o)), test_size=test_size, random_state=random_state, stratify=stratify, train_size=train_size, shuffle=shuffle)
return L(train), L(valid)
return _inner
src = list(range(30))
labels = [0] * 20 + [1] * 10
test_size = 0.2
f = TrainTestSplitter(test_size=test_size, random_state=42, stratify=labels)
trn,val = f(src)
assert 0<len(trn)<len(src)
assert all(o not in val for o in trn)
test_eq(len(trn), len(src)-len(val))
# test random seed consistency
test_eq(f(src)[0], trn)
# test labels distribution consistency
# there should be test_size % of zeroes and ones respectively in the validation set
test_eq(len([t for t in val if t < 20]) / 20, test_size)
test_eq(len([t for t in val if t > 20]) / 10, test_size)
#export
def IndexSplitter(valid_idx):
"Split `items` so that `val_idx` are in the validation set and the others in the training set"
def _inner(o):
train_idx = np.setdiff1d(np.array(range_of(o)), np.array(valid_idx))
return L(train_idx, use_list=True), L(valid_idx, use_list=True)
return _inner
items = list(range(10))
splitter = IndexSplitter([3,7,9])
test_eq(splitter(items),[[0,1,2,4,5,6,8],[3,7,9]])
# export
def _grandparent_idxs(items, name):
def _inner(items, name): return mask2idxs(Path(o).parent.parent.name == name for o in items)
return [i for n in L(name) for i in _inner(items,n)]
# export
def GrandparentSplitter(train_name='train', valid_name='valid'):
"Split `items` from the grand parent folder names (`train_name` and `valid_name`)."
def _inner(o):
return _grandparent_idxs(o, train_name),_grandparent_idxs(o, valid_name)
return _inner
fnames = [path/'train/3/9932.png', path/'valid/7/7189.png',
path/'valid/7/7320.png', path/'train/7/9833.png',
path/'train/3/7666.png', path/'valid/3/925.png',
path/'train/7/724.png', path/'valid/3/93055.png']
splitter = GrandparentSplitter()
test_eq(splitter(fnames),[[0,3,4,6],[1,2,5,7]])
fnames2 = fnames + [path/'test/3/4256.png', path/'test/7/2345.png', path/'valid/7/6467.png']
splitter = GrandparentSplitter(train_name=('train', 'valid'), valid_name='test')
test_eq(splitter(fnames2),[[0,3,4,6,1,2,5,7,10],[8,9]])
# export
def FuncSplitter(func):
"Split `items` by result of `func` (`True` for validation, `False` for training set)."
def _inner(o):
val_idx = mask2idxs(func(o_) for o_ in o)
return IndexSplitter(val_idx)(o)
return _inner
splitter = FuncSplitter(lambda o: Path(o).parent.parent.name == 'valid')
test_eq(splitter(fnames),[[0,3,4,6],[1,2,5,7]])
# export
def MaskSplitter(mask):
"Split `items` depending on the value of `mask`."
def _inner(o): return IndexSplitter(mask2idxs(mask))(o)
return _inner
items = list(range(6))
splitter = MaskSplitter([True,False,False,True,False,True])
test_eq(splitter(items),[[1,2,4],[0,3,5]])
# export
def FileSplitter(fname):
"Split `items` by providing file `fname` (contains names of valid items separated by newline)."
valid = Path(fname).read().split('\n')
def _func(x): return x.name in valid
def _inner(o): return FuncSplitter(_func)(o)
return _inner
with tempfile.TemporaryDirectory() as d:
fname = Path(d)/'valid.txt'
fname.write('\n'.join([Path(fnames[i]).name for i in [1,3,4]]))
splitter = FileSplitter(fname)
test_eq(splitter(fnames),[[0,2,5,6,7],[1,3,4]])
# export
def ColSplitter(col='is_valid'):
"Split `items` (supposed to be a dataframe) by value in `col`"
def _inner(o):
assert isinstance(o, pd.DataFrame), "ColSplitter only works when your items are a pandas DataFrame"
valid_idx = (o.iloc[:,col] if isinstance(col, int) else o[col]).values.astype('bool')
return IndexSplitter(mask2idxs(valid_idx))(o)
return _inner
df = pd.DataFrame({'a': [0,1,2,3,4], 'b': [True,False,True,True,False]})
splits = ColSplitter('b')(df)
test_eq(splits, [[1,4], [0,2,3]])
#Works with strings or index
splits = ColSplitter(1)(df)
test_eq(splits, [[1,4], [0,2,3]])
# does not get confused if the type of 'is_valid' is integer, but it meant to be a yes/no
df = pd.DataFrame({'a': [0,1,2,3,4], 'is_valid': [1,0,1,1,0]})
splits_by_int = ColSplitter('is_valid')(df)
test_eq(splits_by_int, [[1,4], [0,2,3]])
# export
def RandomSubsetSplitter(train_sz, valid_sz, seed=None):
"Take randoms subsets of `splits` with `train_sz` and `valid_sz`"
assert 0 < train_sz < 1
assert 0 < valid_sz < 1
assert train_sz + valid_sz <= 1.
def _inner(o):
if seed is not None: torch.manual_seed(seed)
train_len,valid_len = int(len(o)*train_sz),int(len(o)*valid_sz)
idxs = L(int(i) for i in torch.randperm(len(o)))
return idxs[:train_len],idxs[train_len:train_len+valid_len]
return _inner
items = list(range(100))
valid_idx = list(np.arange(70,100))
splits = RandomSubsetSplitter(0.3, 0.1)(items)
test_eq(len(splits[0]), 30)
test_eq(len(splits[1]), 10)
# export
def parent_label(o):
"Label `item` with the parent folder name."
return Path(o).parent.name
test_eq(parent_label(fnames[0]), '3')
test_eq(parent_label("fastai_dev/dev/data/mnist_tiny/train/3/9932.png"), '3')
[parent_label(o) for o in fnames]
#hide
#test for MS Windows when os.path.sep is '\\' instead of '/'
test_eq(parent_label(os.path.join("fastai_dev","dev","data","mnist_tiny","train", "3", "9932.png") ), '3')
# export
class RegexLabeller():
"Label `item` with regex `pat`."
def __init__(self, pat, match=False):
self.pat = re.compile(pat)
self.matcher = self.pat.match if match else self.pat.search
def __call__(self, o):
res = self.matcher(str(o))
assert res,f'Failed to find "{self.pat}" in "{o}"'
return res.group(1)
f = RegexLabeller(fr'{os.path.sep}(\d){os.path.sep}')
test_eq(f(fnames[0]), '3')
[f(o) for o in fnames]
f = RegexLabeller(r'(\d*)', match=True)
test_eq(f(fnames[0].name), '9932')
#export
class ColReader(DisplayedTransform):
"Read `cols` in `row` with potential `pref` and `suff`"
def __init__(self, cols, pref='', suff='', label_delim=None):
store_attr()
self.pref = str(pref) + os.path.sep if isinstance(pref, Path) else pref
self.cols = L(cols)
def _do_one(self, r, c):
o = r[c] if isinstance(c, int) else r[c] if c=='name' else getattr(r, c)
if len(self.pref)==0 and len(self.suff)==0 and self.label_delim is None: return o
if self.label_delim is None: return f'{self.pref}{o}{self.suff}'
else: return o.split(self.label_delim) if len(o)>0 else []
def __call__(self, o, **kwargs):
if len(self.cols) == 1: return self._do_one(o, self.cols[0])
return L(self._do_one(o, c) for c in self.cols)
df = pd.DataFrame({'a': 'a b c d'.split(), 'b': ['1 2', '0', '', '1 2 3']})
f = ColReader('a', pref='0', suff='1')
test_eq([f(o) for o in df.itertuples()], '0a1 0b1 0c1 0d1'.split())
f = ColReader('b', label_delim=' ')
test_eq([f(o) for o in df.itertuples()], [['1', '2'], ['0'], [], ['1', '2', '3']])
df['a1'] = df['a']
f = ColReader(['a', 'a1'], pref='0', suff='1')
test_eq([f(o) for o in df.itertuples()], [L('0a1', '0a1'), L('0b1', '0b1'), L('0c1', '0c1'), L('0d1', '0d1')])
df = pd.DataFrame({'a': [L(0,1), L(2,3,4), L(5,6,7)]})
f = ColReader('a')
test_eq([f(o) for o in df.itertuples()], [L(0,1), L(2,3,4), L(5,6,7)])
df['name'] = df['a']
f = ColReader('name')
test_eq([f(df.iloc[0,:])], [L(0,1)])
#export
class CategoryMap(CollBase):
"Collection of categories with the reverse mapping in `o2i`"
def __init__(self, col, sort=True, add_na=False, strict=False):
if is_categorical_dtype(col):
items = L(col.cat.categories, use_list=True)
#Remove non-used categories while keeping order
if strict: items = L(o for o in items if o in col.unique())
else:
if not hasattr(col,'unique'): col = L(col, use_list=True)
# `o==o` is the generalized definition of non-NaN used by Pandas
items = L(o for o in col.unique() if o==o)
if sort: items = items.sorted()
self.items = '#na#' + items if add_na else items
self.o2i = defaultdict(int, self.items.val2idx()) if add_na else dict(self.items.val2idx())
def map_objs(self,objs):
"Map `objs` to IDs"
return L(self.o2i[o] for o in objs)
def map_ids(self,ids):
"Map `ids` to objects in vocab"
return L(self.items[o] for o in ids)
def __eq__(self,b): return all_equal(b,self)
t = CategoryMap([4,2,3,4])
test_eq(t, [2,3,4])
test_eq(t.o2i, {2:0,3:1,4:2})
test_eq(t.map_objs([2,3]), [0,1])
test_eq(t.map_ids([0,1]), [2,3])
test_fail(lambda: t.o2i['unseen label'])
t = CategoryMap([4,2,3,4], add_na=True)
test_eq(t, ['#na#',2,3,4])
test_eq(t.o2i, {'#na#':0,2:1,3:2,4:3})
t = CategoryMap(pd.Series([4,2,3,4]), sort=False)
test_eq(t, [4,2,3])
test_eq(t.o2i, {4:0,2:1,3:2})
col = pd.Series(pd.Categorical(['M','H','L','M'], categories=['H','M','L'], ordered=True))
t = CategoryMap(col)
test_eq(t, ['H','M','L'])
test_eq(t.o2i, {'H':0,'M':1,'L':2})
col = pd.Series(pd.Categorical(['M','H','M'], categories=['H','M','L'], ordered=True))
t = CategoryMap(col, strict=True)
test_eq(t, ['H','M'])
test_eq(t.o2i, {'H':0,'M':1})
# export
class Categorize(DisplayedTransform):
"Reversible transform of category string to `vocab` id"
loss_func,order=CrossEntropyLossFlat(),1
def __init__(self, vocab=None, sort=True, add_na=False):
if vocab is not None: vocab = CategoryMap(vocab, sort=sort, add_na=add_na)
store_attr()
def setups(self, dsets):
if self.vocab is None and dsets is not None: self.vocab = CategoryMap(dsets, sort=self.sort, add_na=self.add_na)
self.c = len(self.vocab)
def encodes(self, o): return TensorCategory(self.vocab.o2i[o])
def decodes(self, o): return Category (self.vocab [o])
#export
class Category(str, ShowTitle): _show_args = {'label': 'category'}
cat = Categorize()
tds = Datasets(['cat', 'dog', 'cat'], tfms=[cat])
test_eq(cat.vocab, ['cat', 'dog'])
test_eq(cat('cat'), 0)
test_eq(cat.decode(1), 'dog')
test_stdout(lambda: show_at(tds,2), 'cat')
cat = Categorize(add_na=True)
tds = Datasets(['cat', 'dog', 'cat'], tfms=[cat])
test_eq(cat.vocab, ['#na#', 'cat', 'dog'])
test_eq(cat('cat'), 1)
test_eq(cat.decode(2), 'dog')
test_stdout(lambda: show_at(tds,2), 'cat')
cat = Categorize(vocab=['dog', 'cat'], sort=False, add_na=True)
tds = Datasets(['cat', 'dog', 'cat'], tfms=[cat])
test_eq(cat.vocab, ['#na#', 'dog', 'cat'])
test_eq(cat('dog'), 1)
test_eq(cat.decode(2), 'cat')
test_stdout(lambda: show_at(tds,2), 'cat')
# export
class MultiCategorize(Categorize):
"Reversible transform of multi-category strings to `vocab` id"
loss_func,order=BCEWithLogitsLossFlat(),1
def __init__(self, vocab=None, add_na=False): super().__init__(vocab=vocab,add_na=add_na)
def setups(self, dsets):
if not dsets: return
if self.vocab is None:
vals = set()
for b in dsets: vals = vals.union(set(b))
self.vocab = CategoryMap(list(vals), add_na=self.add_na)
def encodes(self, o): return TensorMultiCategory([self.vocab.o2i[o_] for o_ in o])
def decodes(self, o): return MultiCategory ([self.vocab [o_] for o_ in o])
#export
class MultiCategory(L):
def show(self, ctx=None, sep=';', color='black', **kwargs):
return show_title(sep.join(self.map(str)), ctx=ctx, color=color, **kwargs)
cat = MultiCategorize()
tds = Datasets([['b', 'c'], ['a'], ['a', 'c'], []], tfms=[cat])
test_eq(tds[3][0], TensorMultiCategory([]))
test_eq(cat.vocab, ['a', 'b', 'c'])
test_eq(cat(['a', 'c']), tensor([0,2]))
test_eq(cat([]), tensor([]))
test_eq(cat.decode([1]), ['b'])
test_eq(cat.decode([0,2]), ['a', 'c'])
test_stdout(lambda: show_at(tds,2), 'a;c')
# export
class OneHotEncode(DisplayedTransform):
"One-hot encodes targets"
order=2
def __init__(self, c=None): store_attr()
def setups(self, dsets):
if self.c is None: self.c = len(L(getattr(dsets, 'vocab', None)))
if not self.c: warn("Couldn't infer the number of classes, please pass a value for `c` at init")
def encodes(self, o): return TensorMultiCategory(one_hot(o, self.c).float())
def decodes(self, o): return one_hot_decode(o, None)
_tfm = OneHotEncode(c=3)
test_eq(_tfm([0,2]), tensor([1.,0,1]))
test_eq(_tfm.decode(tensor([0,1,1])), [1,2])
tds = Datasets([['b', 'c'], ['a'], ['a', 'c'], []], [[MultiCategorize(), OneHotEncode()]])
test_eq(tds[1], [tensor([1.,0,0])])
test_eq(tds[3], [tensor([0.,0,0])])
test_eq(tds.decode([tensor([False, True, True])]), [['b','c']])
test_eq(type(tds[1][0]), TensorMultiCategory)
test_stdout(lambda: show_at(tds,2), 'a;c')
#hide
#test with passing the vocab
tds = Datasets([['b', 'c'], ['a'], ['a', 'c'], []], [[MultiCategorize(vocab=['a', 'b', 'c']), OneHotEncode()]])
test_eq(tds[1], [tensor([1.,0,0])])
test_eq(tds[3], [tensor([0.,0,0])])
test_eq(tds.decode([tensor([False, True, True])]), [['b','c']])
test_eq(type(tds[1][0]), TensorMultiCategory)
test_stdout(lambda: show_at(tds,2), 'a;c')
# export
class EncodedMultiCategorize(Categorize):
"Transform of one-hot encoded multi-category that decodes with `vocab`"
loss_func,order=BCEWithLogitsLossFlat(),1
def __init__(self, vocab):
super().__init__(vocab)
self.c = len(vocab)
def encodes(self, o): return TensorMultiCategory(tensor(o).float())
def decodes(self, o): return MultiCategory (one_hot_decode(o, self.vocab))
_tfm = EncodedMultiCategorize(vocab=['a', 'b', 'c'])
test_eq(_tfm([1,0,1]), tensor([1., 0., 1.]))
test_eq(type(_tfm([1,0,1])), TensorMultiCategory)
test_eq(_tfm.decode(tensor([False, True, True])), ['b','c'])
_tfm
#export
class RegressionSetup(DisplayedTransform):
"Transform that floatifies targets"
loss_func=MSELossFlat()
def __init__(self, c=None): store_attr()
def encodes(self, o): return tensor(o).float()
def decodes(self, o): return TitledFloat(o) if o.ndim==0 else TitledTuple(o_.item() for o_ in o)
def setups(self, dsets):
if self.c is not None: return
try: self.c = len(dsets[0]) if hasattr(dsets[0], '__len__') else 1
except: self.c = 0
_tfm = RegressionSetup()
dsets = Datasets([0, 1, 2], RegressionSetup)
test_eq(dsets.c, 1)
test_eq_type(dsets[0], (tensor(0.),))
dsets = Datasets([[0, 1, 2], [3,4,5]], RegressionSetup)
test_eq(dsets.c, 3)
test_eq_type(dsets[0], (tensor([0.,1.,2.]),))
#export
def get_c(dls):
if getattr(dls, 'c', False): return dls.c
if getattr(getattr(dls.train, 'after_item', None), 'c', False): return dls.train.after_item.c
if getattr(getattr(dls.train, 'after_batch', None), 'c', False): return dls.train.after_batch.c
vocab = getattr(dls, 'vocab', [])
if len(vocab) > 0 and is_listy(vocab[-1]): vocab = vocab[-1]
return len(vocab)
path = untar_data(URLs.MNIST_TINY)
items = get_image_files(path)
splitter = GrandparentSplitter()
splits = splitter(items)
train,valid = (items[i] for i in splits)
train[:3],valid[:3]
from PIL import Image
def open_img(fn:Path): return Image.open(fn).copy()
def img2tensor(im:Image.Image): return TensorImage(array(im)[None])
tfms = [[open_img, img2tensor],
[parent_label, Categorize()]]
train_ds = Datasets(train, tfms)
x,y = train_ds[3]
xd,yd = decode_at(train_ds,3)
test_eq(parent_label(train[3]),yd)
test_eq(array(Image.open(train[3])),xd[0].numpy())
ax = show_at(train_ds, 3, cmap="Greys", figsize=(1,1))
assert ax.title.get_text() in ('3','7')
test_fig_exists(ax)
#export
class ToTensor(Transform):
"Convert item to appropriate tensor class"
order = 5
# export
class IntToFloatTensor(DisplayedTransform):
"Transform image to float tensor, optionally dividing by 255 (e.g. for images)."
order = 10 #Need to run after PIL transforms on the GPU
def __init__(self, div=255., div_mask=1): store_attr()
def encodes(self, o:TensorImage): return o.float().div_(self.div)
def encodes(self, o:TensorMask ): return o.long() // self.div_mask
def decodes(self, o:TensorImage): return ((o.clamp(0., 1.) * self.div).long()) if self.div else o
t = (TensorImage(tensor(1)),tensor(2).long(),TensorMask(tensor(3)))
tfm = IntToFloatTensor()
ft = tfm(t)
test_eq(ft, [1./255, 2, 3])
test_eq(type(ft[0]), TensorImage)
test_eq(type(ft[2]), TensorMask)
test_eq(ft[0].type(),'torch.FloatTensor')
test_eq(ft[1].type(),'torch.LongTensor')
test_eq(ft[2].type(),'torch.LongTensor')
# export
def broadcast_vec(dim, ndim, *t, cuda=True):
"Make a vector broadcastable over `dim` (out of `ndim` total) by prepending and appending unit axes"
v = [1]*ndim
v[dim] = -1
f = to_device if cuda else noop
return [f(tensor(o).view(*v)) for o in t]
# export
@docs
class Normalize(DisplayedTransform):
"Normalize/denorm batch of `TensorImage`"
parameters,order = L('mean', 'std'),99
def __init__(self, mean=None, std=None, axes=(0,2,3)): store_attr()
@classmethod
def from_stats(cls, mean, std, dim=1, ndim=4, cuda=True): return cls(*broadcast_vec(dim, ndim, mean, std, cuda=cuda))
def setups(self, dl:DataLoader):
if self.mean is None or self.std is None:
x,*_ = dl.one_batch()
self.mean,self.std = x.mean(self.axes, keepdim=True),x.std(self.axes, keepdim=True)+1e-7
def encodes(self, x:TensorImage): return (x-self.mean) / self.std
def decodes(self, x:TensorImage):
f = to_cpu if x.device.type=='cpu' else noop
return (x*f(self.std) + f(self.mean))
_docs=dict(encodes="Normalize batch", decodes="Denormalize batch")
mean,std = [0.5]*3,[0.5]*3
mean,std = broadcast_vec(1, 4, mean, std)
batch_tfms = [IntToFloatTensor(), Normalize.from_stats(mean,std)]
tdl = TfmdDL(train_ds, after_batch=batch_tfms, bs=4, device=default_device())
x,y = tdl.one_batch()
xd,yd = tdl.decode((x,y))
test_eq(x.type(), 'torch.cuda.FloatTensor' if default_device().type=='cuda' else 'torch.FloatTensor')
test_eq(xd.type(), 'torch.LongTensor')
test_eq(type(x), TensorImage)
test_eq(type(y), TensorCategory)
assert x.mean()<0.0
assert x.std()>0.5
assert 0<xd.float().mean()/255.<1
assert 0<xd.float().std()/255.<0.5
#hide
nrm = Normalize()
batch_tfms = [IntToFloatTensor(), nrm]
tdl = TfmdDL(train_ds, after_batch=batch_tfms, bs=4)
x,y = tdl.one_batch()
test_close(x.mean(), 0.0, 1e-4)
assert x.std()>0.9, x.std()
#Just for visuals
from fastai.vision.core import *
tdl.show_batch((x,y))
#hide
#TODO: make the above check a proper test
x,y = torch.add(x,0),torch.add(y,0) #Lose type of tensors (to emulate predictions)
test_ne(type(x), TensorImage)
tdl.show_batch((x,y), figsize=(1,1)) #Check that types are put back by dl.
#hide
from nbdev.export import notebook2script
notebook2script()
| 0.603698 | 0.911613 |
# Module 4 Day 02
# Instructor Turn - 01- Cleaning Data - 👩🏫🧑🏫
```
# Dependencies
import pandas as pd
import numpy as np
# Name of the CSV file
file = 'Resources/donors2008.csv'
# The correct encoding must be used to read the CSV in pandas
df = pd.read_csv(file, encoding="ISO-8859-1")
# Preview of the DataFrame
# Note that FIELD8 is likely a meaningless column
df.head()
# Delete extraneous column
del df['FIELD8']
df.head()
# Identify incomplete rows
df.count()
# Drop all rows with missing information
df = df.dropna(how='any')
# Verify dropped rows
df.count()
# The Amount column is the wrong data type. It should be numeric.
df.dtypes
# Use pd.to_numeric() method to convert the datatype of the Amount column
df['Amount'] = pd.to_numeric(df['Amount'])
# Verify that the Amount column datatype has been made numeric
df['Amount'].dtype
# Display an overview of the Employers column
df['Employer'].value_counts()
# Clean up Employer category. Replace 'Self Employed' and 'Self' with 'Self-Employed'
df['Employer'] = df['Employer'].replace(
{'Self Employed': 'Self-Employed', 'Self': 'Self-Employed'})
# Verify clean-up.
df['Employer'].value_counts()
df['Employer'] = df['Employer'].replace({'Not Employed': 'Unemployed'})
df['Employer'].value_counts()
# Display a statistical overview
# We can infer the maximum allowable individual contribution from 'max'
df.describe()
```
# Students Turn - 02 - Training Grounds - 👩🎓👨🎓
## Portland Crime
### Instructions
* Read in the csv using Pandas and print out the DataFrame that is returned.
* Get a count of rows within the DataFrame in order to determine if there are any null values.
* Drop the rows which contain null values.
* Search through the "Offense Type" column and "replace" any similar values with one consistent value.
```
# Import Dependencies
import pandas as pd
# Reference the file where the CSV is located
crime_csv_path = "Resources/crime_incident_data2017.csv"
# Import the data into a Pandas DataFrame
crime_df = pd.read_csv(crime_csv_path)
crime_df.head()
# look for missing values
crime_df.count()
# drop null rows
new_crime_df = crime_df.dropna(how='any')
# verify counts
new_crime_df.count()
# Check to see if there are any values with mispelled or similar values in "Offense Type"
new_crime_df["Offense Type"].value_counts()
# Combining similar offenses together
new_crime_df['Offense Type'] = new_crime_df['Offense Type'].replace({'Assisting or Promoting Prostitution': 'Prostitution',
'Commercial Sex Acts': 'Prostitution' })
# Check to see if you comnbined similar offenses correctly in "Offense Type".
new_crime_df['Offense Type'].value_counts()
# Get the number of crimes against property, society, and person.
new_crime_df['Crime Against'].value_counts()
```
<details>
<summary><strong>Activity 02 Solution ✅</strong></summary>
```python
# Import Dependencies
import pandas as pd
# Reference the file where the CSV is located
crime_csv_path = "Resources/crime_incident_data2017.csv"
# Import the data into a Pandas DataFrame
crime_df = pd.read_csv(crime_csv_path)
crime_df
# look for missing values
crime_df.count()
# drop null rows
no_null_crime_df = crime_df.dropna(how='any')
# verify counts
no_null_crime_df.count()
# Check to see if there are any values with mispelled or similar values in "Offense Type"
no_null_crime_df["Offense Type"].value_counts()
# Combining similar offenses together
no_null_crime_df = no_null_crime_df.replace(
{"Commercial Sex Acts": "Prostitution", "Assisting or Promoting Prostitution": "Prostitution"})
no_null_crime_df
# Check to see if you comnbined similar offenses correctly in "Offense Type".
no_null_crime_df["Offense Type"].value_counts()
# Get the number of crimes against property, society, and person.
no_null_crime_df["Crime Against"].value_counts()
```
</details>
# Instructor Turn - 03 - Loc And Iloc - 👩🏫🧑🏫
```
import pandas as pd
file = "Resources/sampleData.csv"
original_df = pd.read_csv(file)
original_df.head()
# Set new index to last_name
df = original_df.set_index("last_name")
df.head()
# Grab the data contained within the "Berry" row and the "Phone Number" column
berry_phone = df.loc["Berry", "Phone Number"]
print("Using Loc: " + berry_phone)
also_berry_phone = df.iloc[1, 2]
print("Using Iloc: " + also_berry_phone)
# Grab the first five rows of data and the columns from "id" to "Phone Number"
# The problem with using "last_name" as the index is that the values are not unique so duplicates are returned
# If there are duplicates and loc[] is being used, Pandas will return an error
richardson_to_morales = df.loc[["Richardson", "Berry", "Hudson",
"Mcdonald", "Morales"], ["id", "first_name", "Phone Number"]]
richardson_to_morales
# Using iloc[] will not find duplicates since a numeric index is always unique
also_richardson_to_morales = df.iloc[0:4, 0:3]
also_richardson_to_morales
# The following will select all rows for columns `first_name` and `Phone Number`
df.loc[:, ["first_name", "Phone Number"]].head()
# the following logic test/conditional statement returns a series of boolean values
named_billy = df["first_name"] == "Billy"
named_billy.head()
# Loc and Iloc also allow for conditional statments to filter rows of data
# using Loc on the logic test above only returns rows where the result is True
only_billys = df.loc[df["first_name"] == "Billy", :]
only_billys
# Multiple conditions can be set to narrow down or widen the filter
only_billy_and_peter = df.loc[(df["first_name"] == "Billy") | (
df["first_name"] == "Peter"), :]
only_billy_and_peter
```
# Students Turn - 04 - Good Movies - 👩🎓👨🎓
## Good Movies
### Instructions
* Use Pandas to load and display the CSV provided in `Resources`.
* List all the columns in the data set.
* We're only interested in IMDb data, so create a new table that takes the Film and all the columns relating to IMDB.
* Filter out only the good movies—i.e., any film with an IMDb score greater than or equal to 7 and remove the norm ratings.
* Find less popular movies that you may not have heard about - i.e., anything with under 20K votes
Data Source:[Moving Rating Dataset](https://github.com/fivethirtyeight/data/blob/master/fandango/fandango_score_comparison.csv)
---
```
# Dependencie
import pandas as pd
# Load in file
movie_file = "Resources/movie_scores.csv"
# Read and display the CSV with Pandas
movie_file_df = pd.read_csv(movie_file)
movie_file_df.head()
# List all the columns in the table
movie_file_df.columns
# We only want IMDb data, so create a new table that takes the Film
# and all the columns relating to IMDB
imdb_df = movie_file_df[["FILM", "IMDB", "IMDB_norm",
"IMDB_norm_round", "IMDB_user_vote_count"]]
imdb_df.head()
# We only like good movies, so find those that scored over 7, and ignore the norm rating
good_movies_df = movie_file_df.loc[movie_file_df["IMDB"] > 7, [
"FILM", "IMDB", "IMDB_user_vote_count"]]
good_movies_df.head()
# Find less popular movies--i.e., those with fewer than 20K votes
unknown_movies_df = good_movies_df.loc[good_movies_df["IMDB_user_vote_count"] < 20000, [
"FILM", "IMDB", "IMDB_user_vote_count"]]
unknown_movies_df.head()
```
<details>
<summary><strong>Activity 04 Solution ✅</strong></summary>
```python
# Dependencie
import pandas as pd
# Load in file
movie_file = "Resources/movie_scores.csv"
# Read and display the CSV with Pandas
movie_file_df = pd.read_csv(movie_file)
movie_file_df.head()
# List all the columns in the table
movie_file_df.columns
# We only want IMDb data, so create a new table that takes the Film and all the columns relating to IMDB
imdb_df = movie_file_df[["FILM", "IMDB", "IMDB_norm",
"IMDB_norm_round", "IMDB_user_vote_count"]]
imdb_df.head()
# We only like good movies, so find those that scored over 7, and ignore the norm rating
good_movies_df = movie_file_df.loc[movie_file_df["IMDB"] > 7, [
"FILM", "IMDB", "IMDB_user_vote_count"]]
good_movies_df.head()
# Find less popular movies--i.e., those with fewer than 20K votes
unknown_movies_df = good_movies_df.loc[good_movies_df["IMDB_user_vote_count"] < 20000, [
"FILM", "IMDB", "IMDB_user_vote_count"]]
unknown_movies_df.head()
```
</details>
# Instructor Turn - 05 - GroupBy - 👩🏫🧑🏫
```
# Import Dependencies
import pandas as pd
# Create a reference the CSV file desired
csv_path = "Resources/ufoSightings.csv"
# Read the CSV into a Pandas DataFrame
ufo_df = pd.read_csv(csv_path, low_memory = False)
# Print the first five rows of data to the screen
ufo_df.head()
# Remove the rows with missing data
clean_ufo_df = ufo_df.dropna(how="any")
clean_ufo_df.count()
clean_ufo_df.head()
clean_ufo_df.dtypes
# Converting the "duration (seconds)" column's values to numeric
converted_ufo = clean_ufo_df.copy()
converted_ufo["duration (seconds)"] = converted_ufo.loc[:, "duration (seconds)"].astype(float)
converted_ufo.head()
converted_ufo.dtypes
# Filter the data so that only those sightings in the US are in a DataFrame
usa_ufo_df = converted_ufo.loc[converted_ufo["country"] == "us", :]
usa_ufo_df.head()
# Count how many sightings have occured within each state
state_counts = usa_ufo_df["state"].value_counts()
state_counts.head()
# Using GroupBy in order to separate the data into fields according to "state" values
grouped_usa_df = usa_ufo_df.groupby(['state'])
# The object returned is a "GroupBy" object and cannot be viewed normally...
print(grouped_usa_df)
# In order to be visualized, a data function must be used...
grouped_usa_df.count().head(10)
grouped_usa_df["duration (seconds)"].sum()
# Since "duration (seconds)" was converted to a numeric time, it can now be summed up per state
state_duration = grouped_usa_df["duration (seconds)"].sum()
state_duration.head()
# Creating a new DataFrame using both duration and count
state_summary_table = pd.DataFrame({"Number of Sightings": state_counts,
"Total Visit Time": state_duration})
state_summary_table.head()
# It is also possible to group a DataFrame by multiple columns
# This returns an object with multiple indexes, however, which can be harder to deal with
grouped_international_data = converted_ufo.groupby(['country', 'state'])
grouped_international_data.count().head(20)
# Converting a GroupBy object into a DataFrame
international_duration = pd.DataFrame(
grouped_international_data["duration (seconds)"].sum())
international_duration.head(10)
```
# Student Turn - 06 - Training Grounds - 👩🎓👨🎓
## Instructions
* Using the DataFrame provided, do the following:
* Convert the "Membership (Days)" column into weeks and then add this new series into the DataFrame
* Create a Dataframe that has only the "Trainer", "Weight", and membership in days and weeks.
* Using groupby get the average weight and length membership of the gym members for each trainer.
```
# Import Dependencies
import pandas as pd
# A seriously gigantic DataFrame of individuals' names, their trainers, their weight, and their days as gym members
training_data = pd.DataFrame({
"Name":["Gino Walker","Hiedi Wasser","Kerrie Wetzel","Elizabeth Sackett","Jack Mitten","Madalene Wayman","Jamee Horvath","Arlena Reddin","Tula Levan","Teisha Dreier","Leslie Carrier","Arlette Hartson","Romana Merkle","Heath Viviani","Andres Zimmer","Allyson Osman","Yadira Caggiano","Jeanmarie Friedrichs","Leann Ussery","Bee Mom","Pandora Charland","Karena Wooten","Elizabet Albanese","Augusta Borjas","Erma Yadon","Belia Lenser","Karmen Sancho","Edison Mannion","Sonja Hornsby","Morgan Frei","Florencio Murphy","Christoper Hertel","Thalia Stepney","Tarah Argento","Nicol Canfield","Pok Moretti","Barbera Stallings","Muoi Kelso","Cicely Ritz","Sid Demelo","Eura Langan","Vanita An","Frieda Fuhr","Ernest Fitzhenry","Ashlyn Tash","Melodi Mclendon","Rochell Leblanc","Jacqui Reasons","Freeda Mccroy","Vanna Runk","Florinda Milot","Cierra Lecompte","Nancey Kysar","Latasha Dalton","Charlyn Rinaldi","Erline Averett","Mariko Hillary","Rosalyn Trigg","Sherwood Brauer","Hortencia Olesen","Delana Kohut","Geoffrey Mcdade","Iona Delancey","Donnie Read","Cesar Bhatia","Evia Slate","Kaye Hugo","Denise Vento","Lang Kittle","Sherry Whittenberg","Jodi Bracero","Tamera Linneman","Katheryn Koelling","Tonia Shorty","Misha Baxley","Lisbeth Goering","Merle Ladwig","Tammie Omar","Jesusa Avilla","Alda Zabala","Junita Dogan","Jessia Anglin","Peggie Scranton","Dania Clodfelter","Janis Mccarthy","Edmund Galusha","Tonisha Posey","Arvilla Medley","Briana Barbour","Delfina Kiger","Nia Lenig","Ricarda Bulow","Odell Carson","Nydia Clonts","Andree Resendez","Daniela Puma","Sherill Paavola","Gilbert Bloomquist","Shanon Mach","Justin Bangert","Arden Hokanson","Evelyne Bridge","Hee Simek","Ward Deangelis","Jodie Childs","Janis Boehme","Beaulah Glowacki","Denver Stoneham","Tarra Vinton","Deborah Hummell","Ulysses Neil","Kathryn Marques","Rosanna Dake","Gavin Wheat","Tameka Stoke","Janella Clear","Kaye Ciriaco","Suk Bloxham","Gracia Whaley","Philomena Hemingway","Claudette Vaillancourt","Olevia Piche","Trey Chiles","Idalia Scardina","Jenine Tremble","Herbert Krider","Alycia Schrock","Miss Weibel","Pearlene Neidert","Kina Callender","Charlotte Skelley","Theodora Harrigan","Sydney Shreffler","Annamae Trinidad","Tobi Mumme","Rosia Elliot","Debbra Putt","Rena Delosantos","Genna Grennan","Nieves Huf","Berry Lugo","Ayana Verdugo","Joaquin Mazzei","Doris Harmon","Patience Poss","Magaret Zabel","Marylynn Hinojos","Earlene Marcantel","Yuki Evensen","Rema Gay","Delana Haak","Patricia Fetters","Vinnie Elrod","Octavia Bellew","Burma Revard","Lakenya Kato","Vinita Buchner","Sierra Margulies","Shae Funderburg","Jenae Groleau","Louetta Howie","Astrid Duffer","Caron Altizer","Kymberly Amavisca","Mohammad Diedrich","Thora Wrinkle","Bethel Wiemann","Patria Millet","Eldridge Burbach","Alyson Eddie","Zula Hanna","Devin Goodwin","Felipa Kirkwood","Kurtis Kempf","Kasey Lenart","Deena Blankenship","Kandra Wargo","Sherrie Cieslak","Ron Atha","Reggie Barreiro","Daria Saulter","Tandra Eastman","Donnell Lucious","Talisha Rosner","Emiko Bergh","Terresa Launius","Margy Hoobler","Marylou Stelling","Lavonne Justice","Kala Langstaff","China Truett","Louanne Dussault","Thomasena Samaniego","Charlesetta Tarbell","Fatimah Lade","Malisa Cantero","Florencia Litten","Francina Fraise","Patsy London","Deloris Mclaughlin"],
"Trainer":['Bettyann Savory','Mariah Barberio','Gordon Perrine','Pa Dargan','Blanch Victoria','Aldo Byler','Aldo Byler','Williams Camire','Junie Ritenour','Gordon Perrine','Bettyann Savory','Mariah Barberio','Aldo Byler','Barton Stecklein','Bettyann Savory','Barton Stecklein','Gordon Perrine','Pa Dargan','Aldo Byler','Brittani Brin','Bettyann Savory','Phyliss Houk','Bettyann Savory','Junie Ritenour','Aldo Byler','Calvin North','Brittani Brin','Junie Ritenour','Blanch Victoria','Brittani Brin','Bettyann Savory','Blanch Victoria','Mariah Barberio','Bettyann Savory','Blanch Victoria','Brittani Brin','Junie Ritenour','Pa Dargan','Gordon Perrine','Phyliss Houk','Pa Dargan','Mariah Barberio','Phyliss Houk','Phyliss Houk','Calvin North','Williams Camire','Brittani Brin','Gordon Perrine','Bettyann Savory','Bettyann Savory','Pa Dargan','Phyliss Houk','Barton Stecklein','Blanch Victoria','Coleman Dunmire','Phyliss Houk','Blanch Victoria','Pa Dargan','Harland Coolidge','Calvin North','Bettyann Savory','Phyliss Houk','Bettyann Savory','Harland Coolidge','Gordon Perrine','Junie Ritenour','Harland Coolidge','Blanch Victoria','Mariah Barberio','Coleman Dunmire','Aldo Byler','Bettyann Savory','Gordon Perrine','Bettyann Savory','Barton Stecklein','Harland Coolidge','Aldo Byler','Aldo Byler','Pa Dargan','Junie Ritenour','Brittani Brin','Junie Ritenour','Gordon Perrine','Mariah Barberio','Mariah Barberio','Mariah Barberio','Bettyann Savory','Brittani Brin','Aldo Byler','Phyliss Houk','Blanch Victoria','Pa Dargan','Phyliss Houk','Brittani Brin','Barton Stecklein','Coleman Dunmire','Bettyann Savory','Bettyann Savory','Gordon Perrine','Blanch Victoria','Junie Ritenour','Phyliss Houk','Coleman Dunmire','Williams Camire','Harland Coolidge','Williams Camire','Aldo Byler','Harland Coolidge','Gordon Perrine','Brittani Brin','Coleman Dunmire','Calvin North','Phyliss Houk','Brittani Brin','Aldo Byler','Bettyann Savory','Brittani Brin','Gordon Perrine','Calvin North','Harland Coolidge','Coleman Dunmire','Harland Coolidge','Aldo Byler','Junie Ritenour','Blanch Victoria','Harland Coolidge','Blanch Victoria','Junie Ritenour','Harland Coolidge','Junie Ritenour','Gordon Perrine','Brittani Brin','Coleman Dunmire','Williams Camire','Junie Ritenour','Brittani Brin','Calvin North','Barton Stecklein','Barton Stecklein','Mariah Barberio','Coleman Dunmire','Bettyann Savory','Mariah Barberio','Pa Dargan','Barton Stecklein','Coleman Dunmire','Brittani Brin','Barton Stecklein','Pa Dargan','Barton Stecklein','Junie Ritenour','Bettyann Savory','Williams Camire','Pa Dargan','Calvin North','Williams Camire','Coleman Dunmire','Aldo Byler','Barton Stecklein','Coleman Dunmire','Blanch Victoria','Mariah Barberio','Mariah Barberio','Harland Coolidge','Barton Stecklein','Phyliss Houk','Pa Dargan','Bettyann Savory','Barton Stecklein','Harland Coolidge','Junie Ritenour','Pa Dargan','Mariah Barberio','Blanch Victoria','Williams Camire','Phyliss Houk','Phyliss Houk','Coleman Dunmire','Mariah Barberio','Gordon Perrine','Coleman Dunmire','Brittani Brin','Pa Dargan','Coleman Dunmire','Brittani Brin','Blanch Victoria','Coleman Dunmire','Gordon Perrine','Coleman Dunmire','Aldo Byler','Aldo Byler','Mariah Barberio','Williams Camire','Phyliss Houk','Aldo Byler','Williams Camire','Aldo Byler','Williams Camire','Coleman Dunmire','Phyliss Houk'],
"Weight":[128,180,193,177,237,166,224,208,177,241,114,161,162,151,220,142,193,193,124,130,132,141,190,239,213,131,172,127,184,157,215,122,181,240,218,205,239,217,234,158,180,131,194,171,177,110,117,114,217,123,248,189,198,127,182,121,224,111,151,170,188,150,137,231,222,186,139,175,178,246,150,154,129,216,144,198,228,183,173,129,157,199,186,232,172,157,246,239,214,161,132,208,187,224,164,177,175,224,219,235,112,241,243,179,208,196,131,207,182,233,191,162,173,197,190,182,231,196,196,143,250,174,138,135,164,204,235,192,114,179,215,127,185,213,250,213,153,217,176,190,119,167,118,208,113,206,200,236,159,218,168,159,156,183,121,203,215,209,179,219,174,220,129,188,217,250,166,157,112,236,182,144,189,243,238,147,165,115,160,134,245,174,238,157,150,184,174,134,134,248,199,165,117,119,162,112,170,224,247,217],
"Membership (Days)":[52,70,148,124,186,157,127,155,37,185,158,129,93,69,124,13,76,153,164,161,48,121,167,69,39,163,7,34,176,169,108,162,195,86,155,77,197,200,80,142,179,67,58,145,188,147,125,15,13,173,125,4,61,29,132,110,62,137,197,135,162,174,32,151,149,65,18,42,63,62,104,200,189,40,38,199,1,12,8,2,195,30,7,72,130,144,2,34,200,143,43,196,22,115,171,54,143,59,14,52,109,115,187,185,26,19,178,18,120,169,45,52,130,69,168,178,96,22,78,152,39,51,118,130,60,156,108,69,103,158,165,142,86,91,117,77,57,169,86,188,97,111,22,83,81,177,163,35,12,164,21,181,171,138,22,107,58,51,38,128,19,193,157,13,104,89,13,10,26,190,179,101,7,159,100,49,120,109,56,199,51,108,47,171,69,162,74,119,148,88,32,159,65,146,140,171,88,18,59,13]
})
training_data.head()
# Convert the membership days into weeks and then add a this data in a new column to the DataFrame.
weeks = training_data["Membership (Days)"]/7
training_data["Membership (Weeks)"] = weeks
training_data.head()
# Create a Dataframe that has the Trainer, Weight, and Membership.
new_training_df = training_data[["Trainer", "Weight", "Membership (Days)"]]
new_training_df.head()
trainer_df = new_training_df.groupby(["Trainer"]).mean()
trainer_df
# Using groupby get the average weight and length membership for each trainer.
sum_df = pd.DataFrame(
new_training_df.groupby(["Trainer"]).sum())
sum_df
```
<details>
<summary><strong>Activity 06 Solution ✅</strong></summary>
```python
# Import Dependencies
import pandas as pd
# A seriously gigantic DataFrame of individuals' names, their trainers, their weight, and their days as gym members
training_data = pd.DataFrame({
"Name":["Gino Walker","Hiedi Wasser","Kerrie Wetzel","Elizabeth Sackett","Jack Mitten","Madalene Wayman","Jamee Horvath","Arlena Reddin","Tula Levan","Teisha Dreier","Leslie Carrier","Arlette Hartson","Romana Merkle","Heath Viviani","Andres Zimmer","Allyson Osman","Yadira Caggiano","Jeanmarie Friedrichs","Leann Ussery","Bee Mom","Pandora Charland","Karena Wooten","Elizabet Albanese","Augusta Borjas","Erma Yadon","Belia Lenser","Karmen Sancho","Edison Mannion","Sonja Hornsby","Morgan Frei","Florencio Murphy","Christoper Hertel","Thalia Stepney","Tarah Argento","Nicol Canfield","Pok Moretti","Barbera Stallings","Muoi Kelso","Cicely Ritz","Sid Demelo","Eura Langan","Vanita An","Frieda Fuhr","Ernest Fitzhenry","Ashlyn Tash","Melodi Mclendon","Rochell Leblanc","Jacqui Reasons","Freeda Mccroy","Vanna Runk","Florinda Milot","Cierra Lecompte","Nancey Kysar","Latasha Dalton","Charlyn Rinaldi","Erline Averett","Mariko Hillary","Rosalyn Trigg","Sherwood Brauer","Hortencia Olesen","Delana Kohut","Geoffrey Mcdade","Iona Delancey","Donnie Read","Cesar Bhatia","Evia Slate","Kaye Hugo","Denise Vento","Lang Kittle","Sherry Whittenberg","Jodi Bracero","Tamera Linneman","Katheryn Koelling","Tonia Shorty","Misha Baxley","Lisbeth Goering","Merle Ladwig","Tammie Omar","Jesusa Avilla","Alda Zabala","Junita Dogan","Jessia Anglin","Peggie Scranton","Dania Clodfelter","Janis Mccarthy","Edmund Galusha","Tonisha Posey","Arvilla Medley","Briana Barbour","Delfina Kiger","Nia Lenig","Ricarda Bulow","Odell Carson","Nydia Clonts","Andree Resendez","Daniela Puma","Sherill Paavola","Gilbert Bloomquist","Shanon Mach","Justin Bangert","Arden Hokanson","Evelyne Bridge","Hee Simek","Ward Deangelis","Jodie Childs","Janis Boehme","Beaulah Glowacki","Denver Stoneham","Tarra Vinton","Deborah Hummell","Ulysses Neil","Kathryn Marques","Rosanna Dake","Gavin Wheat","Tameka Stoke","Janella Clear","Kaye Ciriaco","Suk Bloxham","Gracia Whaley","Philomena Hemingway","Claudette Vaillancourt","Olevia Piche","Trey Chiles","Idalia Scardina","Jenine Tremble","Herbert Krider","Alycia Schrock","Miss Weibel","Pearlene Neidert","Kina Callender","Charlotte Skelley","Theodora Harrigan","Sydney Shreffler","Annamae Trinidad","Tobi Mumme","Rosia Elliot","Debbra Putt","Rena Delosantos","Genna Grennan","Nieves Huf","Berry Lugo","Ayana Verdugo","Joaquin Mazzei","Doris Harmon","Patience Poss","Magaret Zabel","Marylynn Hinojos","Earlene Marcantel","Yuki Evensen","Rema Gay","Delana Haak","Patricia Fetters","Vinnie Elrod","Octavia Bellew","Burma Revard","Lakenya Kato","Vinita Buchner","Sierra Margulies","Shae Funderburg","Jenae Groleau","Louetta Howie","Astrid Duffer","Caron Altizer","Kymberly Amavisca","Mohammad Diedrich","Thora Wrinkle","Bethel Wiemann","Patria Millet","Eldridge Burbach","Alyson Eddie","Zula Hanna","Devin Goodwin","Felipa Kirkwood","Kurtis Kempf","Kasey Lenart","Deena Blankenship","Kandra Wargo","Sherrie Cieslak","Ron Atha","Reggie Barreiro","Daria Saulter","Tandra Eastman","Donnell Lucious","Talisha Rosner","Emiko Bergh","Terresa Launius","Margy Hoobler","Marylou Stelling","Lavonne Justice","Kala Langstaff","China Truett","Louanne Dussault","Thomasena Samaniego","Charlesetta Tarbell","Fatimah Lade","Malisa Cantero","Florencia Litten","Francina Fraise","Patsy London","Deloris Mclaughlin"],
"Trainer":['Bettyann Savory','Mariah Barberio','Gordon Perrine','Pa Dargan','Blanch Victoria','Aldo Byler','Aldo Byler','Williams Camire','Junie Ritenour','Gordon Perrine','Bettyann Savory','Mariah Barberio','Aldo Byler','Barton Stecklein','Bettyann Savory','Barton Stecklein','Gordon Perrine','Pa Dargan','Aldo Byler','Brittani Brin','Bettyann Savory','Phyliss Houk','Bettyann Savory','Junie Ritenour','Aldo Byler','Calvin North','Brittani Brin','Junie Ritenour','Blanch Victoria','Brittani Brin','Bettyann Savory','Blanch Victoria','Mariah Barberio','Bettyann Savory','Blanch Victoria','Brittani Brin','Junie Ritenour','Pa Dargan','Gordon Perrine','Phyliss Houk','Pa Dargan','Mariah Barberio','Phyliss Houk','Phyliss Houk','Calvin North','Williams Camire','Brittani Brin','Gordon Perrine','Bettyann Savory','Bettyann Savory','Pa Dargan','Phyliss Houk','Barton Stecklein','Blanch Victoria','Coleman Dunmire','Phyliss Houk','Blanch Victoria','Pa Dargan','Harland Coolidge','Calvin North','Bettyann Savory','Phyliss Houk','Bettyann Savory','Harland Coolidge','Gordon Perrine','Junie Ritenour','Harland Coolidge','Blanch Victoria','Mariah Barberio','Coleman Dunmire','Aldo Byler','Bettyann Savory','Gordon Perrine','Bettyann Savory','Barton Stecklein','Harland Coolidge','Aldo Byler','Aldo Byler','Pa Dargan','Junie Ritenour','Brittani Brin','Junie Ritenour','Gordon Perrine','Mariah Barberio','Mariah Barberio','Mariah Barberio','Bettyann Savory','Brittani Brin','Aldo Byler','Phyliss Houk','Blanch Victoria','Pa Dargan','Phyliss Houk','Brittani Brin','Barton Stecklein','Coleman Dunmire','Bettyann Savory','Bettyann Savory','Gordon Perrine','Blanch Victoria','Junie Ritenour','Phyliss Houk','Coleman Dunmire','Williams Camire','Harland Coolidge','Williams Camire','Aldo Byler','Harland Coolidge','Gordon Perrine','Brittani Brin','Coleman Dunmire','Calvin North','Phyliss Houk','Brittani Brin','Aldo Byler','Bettyann Savory','Brittani Brin','Gordon Perrine','Calvin North','Harland Coolidge','Coleman Dunmire','Harland Coolidge','Aldo Byler','Junie Ritenour','Blanch Victoria','Harland Coolidge','Blanch Victoria','Junie Ritenour','Harland Coolidge','Junie Ritenour','Gordon Perrine','Brittani Brin','Coleman Dunmire','Williams Camire','Junie Ritenour','Brittani Brin','Calvin North','Barton Stecklein','Barton Stecklein','Mariah Barberio','Coleman Dunmire','Bettyann Savory','Mariah Barberio','Pa Dargan','Barton Stecklein','Coleman Dunmire','Brittani Brin','Barton Stecklein','Pa Dargan','Barton Stecklein','Junie Ritenour','Bettyann Savory','Williams Camire','Pa Dargan','Calvin North','Williams Camire','Coleman Dunmire','Aldo Byler','Barton Stecklein','Coleman Dunmire','Blanch Victoria','Mariah Barberio','Mariah Barberio','Harland Coolidge','Barton Stecklein','Phyliss Houk','Pa Dargan','Bettyann Savory','Barton Stecklein','Harland Coolidge','Junie Ritenour','Pa Dargan','Mariah Barberio','Blanch Victoria','Williams Camire','Phyliss Houk','Phyliss Houk','Coleman Dunmire','Mariah Barberio','Gordon Perrine','Coleman Dunmire','Brittani Brin','Pa Dargan','Coleman Dunmire','Brittani Brin','Blanch Victoria','Coleman Dunmire','Gordon Perrine','Coleman Dunmire','Aldo Byler','Aldo Byler','Mariah Barberio','Williams Camire','Phyliss Houk','Aldo Byler','Williams Camire','Aldo Byler','Williams Camire','Coleman Dunmire','Phyliss Houk'],
"Weight":[128,180,193,177,237,166,224,208,177,241,114,161,162,151,220,142,193,193,124,130,132,141,190,239,213,131,172,127,184,157,215,122,181,240,218,205,239,217,234,158,180,131,194,171,177,110,117,114,217,123,248,189,198,127,182,121,224,111,151,170,188,150,137,231,222,186,139,175,178,246,150,154,129,216,144,198,228,183,173,129,157,199,186,232,172,157,246,239,214,161,132,208,187,224,164,177,175,224,219,235,112,241,243,179,208,196,131,207,182,233,191,162,173,197,190,182,231,196,196,143,250,174,138,135,164,204,235,192,114,179,215,127,185,213,250,213,153,217,176,190,119,167,118,208,113,206,200,236,159,218,168,159,156,183,121,203,215,209,179,219,174,220,129,188,217,250,166,157,112,236,182,144,189,243,238,147,165,115,160,134,245,174,238,157,150,184,174,134,134,248,199,165,117,119,162,112,170,224,247,217],
"Membership (Days)":[52,70,148,124,186,157,127,155,37,185,158,129,93,69,124,13,76,153,164,161,48,121,167,69,39,163,7,34,176,169,108,162,195,86,155,77,197,200,80,142,179,67,58,145,188,147,125,15,13,173,125,4,61,29,132,110,62,137,197,135,162,174,32,151,149,65,18,42,63,62,104,200,189,40,38,199,1,12,8,2,195,30,7,72,130,144,2,34,200,143,43,196,22,115,171,54,143,59,14,52,109,115,187,185,26,19,178,18,120,169,45,52,130,69,168,178,96,22,78,152,39,51,118,130,60,156,108,69,103,158,165,142,86,91,117,77,57,169,86,188,97,111,22,83,81,177,163,35,12,164,21,181,171,138,22,107,58,51,38,128,19,193,157,13,104,89,13,10,26,190,179,101,7,159,100,49,120,109,56,199,51,108,47,171,69,162,74,119,148,88,32,159,65,146,140,171,88,18,59,13]
})
training_data.head()
# Convert the membership days into weeks and then add a this data in a new column to the DataFrame.
weeks = training_data["Membership (Days)"]/7
training_data["Membership (Weeks)"] = weeks
training_data.head()
# Create a Dataframe that has the Trainer, Weight, and Membership.
trainers_data = training_data[["Trainer", "Weight", "Membership (Days)", "Membership (Weeks)"]]
trainers_data
# Using groupby get the average weight and length membership for each trainer.
trainers_means = trainers_data.groupby(["Trainer"]).mean()
trainers_means
trainers_means.sort_values(by='Membership (Days)', ascending=False)
```
</details>
# Instructor Turn - 07 - Binning - 👩🏫🧑🏫
```
# Import Dependencies
import pandas as pd
# Load in file
final_exam_scores = "resources/final_exam_scores.csv"
df = pd.read_csv(final_exam_scores)
df.head(10)
# Create the bins in which Data will be held
# Bins are 0, 59, 69, 79, 89, 100.
bins = [0, 59, 69, 79, 89, 100]
# Create the names for the four bins
group_names = ["F", "D", "C", "B", "A"]
df["Grade"] = pd.cut(df["Final Exam"], bins, labels=group_names)
df.head(10)
# Get the summary statitics of each letter grade.
df = df.groupby("Grade")
df.describe()
```
# Student Turn - 08 - Binning Ted - 👩🎓👨🎓
## Binning TED
### Instructions
* Read in the CSV file provided and print it to the screen.
* Find the minimum "views" and maximum "views".
* Using the minimum and maximum "views" as a reference, create 10 bins in which to slice the data.
* Create a new column called "View Group" and fill it with the values collected through your slicing.
* Group the DataFrame based upon the values within "View Group".
* Find out how many rows fall into each group before finding the averages for "comments", "duration", and "languages".
```
# Import Dependencies
import pandas as pd
# Create a path to the csv and read it into a Pandas DataFrame
csv_path = "Resources/ted_talks.csv"
ted_df = pd.read_csv(csv_path)
ted_df.head()
# Figure out the minimum and maximum views for a TED Talk
print(ted_df["views"].min())
print(ted_df["views"].max())
# Create bins in which to place values based upon TED Talk views
bins = [0, 199999, 399999, 599999, 799999, 999999,
1999999, 2999999, 3999999, 4999999, 50000000]
# Create labels for these bins
bin_labels = ["0 to 199k", "200k to 399k", "400k to 599k", "600k to 799k", "800k to 999k",
"1mil to 2mil", "2mil to 3mil", "3mil to 4mil", "4mil to 5mil", "5mil to 50mil"]
# Slice the data and place it into bins
# Place the data series into a new column inside of the DataFrame
ted_df["View ranges"] = pd.cut(ted_df["views"], bins, labels=bin_labels)
ted_df.head()
# Create a GroupBy object based upon "View ranges"
view_group = ted_df.groupby("View ranges")
# Find how many rows fall into each bin
ted_df["View ranges"].value_counts()
#print(view_group["comments"].count())
# Get the average of each column within the GroupBy object
view_group.mean()
#view_group[["comments", "duration", "languages"]].mean()
```
<details>
<summary><strong>Activity 08 Solution ✅</strong></summary>
```python
# Import Dependencies
import pandas as pd
# Create a path to the csv and read it into a Pandas DataFrame
csv_path = "Resources/ted_talks.csv"
ted_df = pd.read_csv(csv_path)
ted_df.head()
# Figure out the minimum and maximum views for a TED Talk
print(ted_df["views"].max())
print(ted_df["views"].min())
# Create bins in which to place values based upon TED Talk views
bins = [0, 199999, 399999, 599999, 799999, 999999,
1999999, 2999999, 3999999, 4999999, 50000000]
# Create labels for these bins
group_labels = ["0 to 199k", "200k to 399k", "400k to 599k", "600k to 799k", "800k to 999k", "1mil to 2mil",
"2mil to 3mil", "3mil to 4mil", "4mil to 5mil", "5mil to 50mil"]
# Slice the data and place it into bins
pd.cut(ted_df["views"], bins, labels=group_labels).head()
# Place the data series into a new column inside of the DataFrame
ted_df["View Group"] = pd.cut(ted_df["views"], bins, labels=group_labels)
ted_df.head()
# Create a GroupBy object based upon "View Group"
ted_group = ted_df.groupby("View Group")
# Find how many rows fall into each bin
print(ted_group["comments"].count())
# Get the average of each column within the GroupBy object
ted_group[["comments", "duration", "languages"]].mean()
```
</details>
|
github_jupyter
|
# Dependencies
import pandas as pd
import numpy as np
# Name of the CSV file
file = 'Resources/donors2008.csv'
# The correct encoding must be used to read the CSV in pandas
df = pd.read_csv(file, encoding="ISO-8859-1")
# Preview of the DataFrame
# Note that FIELD8 is likely a meaningless column
df.head()
# Delete extraneous column
del df['FIELD8']
df.head()
# Identify incomplete rows
df.count()
# Drop all rows with missing information
df = df.dropna(how='any')
# Verify dropped rows
df.count()
# The Amount column is the wrong data type. It should be numeric.
df.dtypes
# Use pd.to_numeric() method to convert the datatype of the Amount column
df['Amount'] = pd.to_numeric(df['Amount'])
# Verify that the Amount column datatype has been made numeric
df['Amount'].dtype
# Display an overview of the Employers column
df['Employer'].value_counts()
# Clean up Employer category. Replace 'Self Employed' and 'Self' with 'Self-Employed'
df['Employer'] = df['Employer'].replace(
{'Self Employed': 'Self-Employed', 'Self': 'Self-Employed'})
# Verify clean-up.
df['Employer'].value_counts()
df['Employer'] = df['Employer'].replace({'Not Employed': 'Unemployed'})
df['Employer'].value_counts()
# Display a statistical overview
# We can infer the maximum allowable individual contribution from 'max'
df.describe()
# Import Dependencies
import pandas as pd
# Reference the file where the CSV is located
crime_csv_path = "Resources/crime_incident_data2017.csv"
# Import the data into a Pandas DataFrame
crime_df = pd.read_csv(crime_csv_path)
crime_df.head()
# look for missing values
crime_df.count()
# drop null rows
new_crime_df = crime_df.dropna(how='any')
# verify counts
new_crime_df.count()
# Check to see if there are any values with mispelled or similar values in "Offense Type"
new_crime_df["Offense Type"].value_counts()
# Combining similar offenses together
new_crime_df['Offense Type'] = new_crime_df['Offense Type'].replace({'Assisting or Promoting Prostitution': 'Prostitution',
'Commercial Sex Acts': 'Prostitution' })
# Check to see if you comnbined similar offenses correctly in "Offense Type".
new_crime_df['Offense Type'].value_counts()
# Get the number of crimes against property, society, and person.
new_crime_df['Crime Against'].value_counts()
# Import Dependencies
import pandas as pd
# Reference the file where the CSV is located
crime_csv_path = "Resources/crime_incident_data2017.csv"
# Import the data into a Pandas DataFrame
crime_df = pd.read_csv(crime_csv_path)
crime_df
# look for missing values
crime_df.count()
# drop null rows
no_null_crime_df = crime_df.dropna(how='any')
# verify counts
no_null_crime_df.count()
# Check to see if there are any values with mispelled or similar values in "Offense Type"
no_null_crime_df["Offense Type"].value_counts()
# Combining similar offenses together
no_null_crime_df = no_null_crime_df.replace(
{"Commercial Sex Acts": "Prostitution", "Assisting or Promoting Prostitution": "Prostitution"})
no_null_crime_df
# Check to see if you comnbined similar offenses correctly in "Offense Type".
no_null_crime_df["Offense Type"].value_counts()
# Get the number of crimes against property, society, and person.
no_null_crime_df["Crime Against"].value_counts()
import pandas as pd
file = "Resources/sampleData.csv"
original_df = pd.read_csv(file)
original_df.head()
# Set new index to last_name
df = original_df.set_index("last_name")
df.head()
# Grab the data contained within the "Berry" row and the "Phone Number" column
berry_phone = df.loc["Berry", "Phone Number"]
print("Using Loc: " + berry_phone)
also_berry_phone = df.iloc[1, 2]
print("Using Iloc: " + also_berry_phone)
# Grab the first five rows of data and the columns from "id" to "Phone Number"
# The problem with using "last_name" as the index is that the values are not unique so duplicates are returned
# If there are duplicates and loc[] is being used, Pandas will return an error
richardson_to_morales = df.loc[["Richardson", "Berry", "Hudson",
"Mcdonald", "Morales"], ["id", "first_name", "Phone Number"]]
richardson_to_morales
# Using iloc[] will not find duplicates since a numeric index is always unique
also_richardson_to_morales = df.iloc[0:4, 0:3]
also_richardson_to_morales
# The following will select all rows for columns `first_name` and `Phone Number`
df.loc[:, ["first_name", "Phone Number"]].head()
# the following logic test/conditional statement returns a series of boolean values
named_billy = df["first_name"] == "Billy"
named_billy.head()
# Loc and Iloc also allow for conditional statments to filter rows of data
# using Loc on the logic test above only returns rows where the result is True
only_billys = df.loc[df["first_name"] == "Billy", :]
only_billys
# Multiple conditions can be set to narrow down or widen the filter
only_billy_and_peter = df.loc[(df["first_name"] == "Billy") | (
df["first_name"] == "Peter"), :]
only_billy_and_peter
# Dependencie
import pandas as pd
# Load in file
movie_file = "Resources/movie_scores.csv"
# Read and display the CSV with Pandas
movie_file_df = pd.read_csv(movie_file)
movie_file_df.head()
# List all the columns in the table
movie_file_df.columns
# We only want IMDb data, so create a new table that takes the Film
# and all the columns relating to IMDB
imdb_df = movie_file_df[["FILM", "IMDB", "IMDB_norm",
"IMDB_norm_round", "IMDB_user_vote_count"]]
imdb_df.head()
# We only like good movies, so find those that scored over 7, and ignore the norm rating
good_movies_df = movie_file_df.loc[movie_file_df["IMDB"] > 7, [
"FILM", "IMDB", "IMDB_user_vote_count"]]
good_movies_df.head()
# Find less popular movies--i.e., those with fewer than 20K votes
unknown_movies_df = good_movies_df.loc[good_movies_df["IMDB_user_vote_count"] < 20000, [
"FILM", "IMDB", "IMDB_user_vote_count"]]
unknown_movies_df.head()
# Dependencie
import pandas as pd
# Load in file
movie_file = "Resources/movie_scores.csv"
# Read and display the CSV with Pandas
movie_file_df = pd.read_csv(movie_file)
movie_file_df.head()
# List all the columns in the table
movie_file_df.columns
# We only want IMDb data, so create a new table that takes the Film and all the columns relating to IMDB
imdb_df = movie_file_df[["FILM", "IMDB", "IMDB_norm",
"IMDB_norm_round", "IMDB_user_vote_count"]]
imdb_df.head()
# We only like good movies, so find those that scored over 7, and ignore the norm rating
good_movies_df = movie_file_df.loc[movie_file_df["IMDB"] > 7, [
"FILM", "IMDB", "IMDB_user_vote_count"]]
good_movies_df.head()
# Find less popular movies--i.e., those with fewer than 20K votes
unknown_movies_df = good_movies_df.loc[good_movies_df["IMDB_user_vote_count"] < 20000, [
"FILM", "IMDB", "IMDB_user_vote_count"]]
unknown_movies_df.head()
# Import Dependencies
import pandas as pd
# Create a reference the CSV file desired
csv_path = "Resources/ufoSightings.csv"
# Read the CSV into a Pandas DataFrame
ufo_df = pd.read_csv(csv_path, low_memory = False)
# Print the first five rows of data to the screen
ufo_df.head()
# Remove the rows with missing data
clean_ufo_df = ufo_df.dropna(how="any")
clean_ufo_df.count()
clean_ufo_df.head()
clean_ufo_df.dtypes
# Converting the "duration (seconds)" column's values to numeric
converted_ufo = clean_ufo_df.copy()
converted_ufo["duration (seconds)"] = converted_ufo.loc[:, "duration (seconds)"].astype(float)
converted_ufo.head()
converted_ufo.dtypes
# Filter the data so that only those sightings in the US are in a DataFrame
usa_ufo_df = converted_ufo.loc[converted_ufo["country"] == "us", :]
usa_ufo_df.head()
# Count how many sightings have occured within each state
state_counts = usa_ufo_df["state"].value_counts()
state_counts.head()
# Using GroupBy in order to separate the data into fields according to "state" values
grouped_usa_df = usa_ufo_df.groupby(['state'])
# The object returned is a "GroupBy" object and cannot be viewed normally...
print(grouped_usa_df)
# In order to be visualized, a data function must be used...
grouped_usa_df.count().head(10)
grouped_usa_df["duration (seconds)"].sum()
# Since "duration (seconds)" was converted to a numeric time, it can now be summed up per state
state_duration = grouped_usa_df["duration (seconds)"].sum()
state_duration.head()
# Creating a new DataFrame using both duration and count
state_summary_table = pd.DataFrame({"Number of Sightings": state_counts,
"Total Visit Time": state_duration})
state_summary_table.head()
# It is also possible to group a DataFrame by multiple columns
# This returns an object with multiple indexes, however, which can be harder to deal with
grouped_international_data = converted_ufo.groupby(['country', 'state'])
grouped_international_data.count().head(20)
# Converting a GroupBy object into a DataFrame
international_duration = pd.DataFrame(
grouped_international_data["duration (seconds)"].sum())
international_duration.head(10)
# Import Dependencies
import pandas as pd
# A seriously gigantic DataFrame of individuals' names, their trainers, their weight, and their days as gym members
training_data = pd.DataFrame({
"Name":["Gino Walker","Hiedi Wasser","Kerrie Wetzel","Elizabeth Sackett","Jack Mitten","Madalene Wayman","Jamee Horvath","Arlena Reddin","Tula Levan","Teisha Dreier","Leslie Carrier","Arlette Hartson","Romana Merkle","Heath Viviani","Andres Zimmer","Allyson Osman","Yadira Caggiano","Jeanmarie Friedrichs","Leann Ussery","Bee Mom","Pandora Charland","Karena Wooten","Elizabet Albanese","Augusta Borjas","Erma Yadon","Belia Lenser","Karmen Sancho","Edison Mannion","Sonja Hornsby","Morgan Frei","Florencio Murphy","Christoper Hertel","Thalia Stepney","Tarah Argento","Nicol Canfield","Pok Moretti","Barbera Stallings","Muoi Kelso","Cicely Ritz","Sid Demelo","Eura Langan","Vanita An","Frieda Fuhr","Ernest Fitzhenry","Ashlyn Tash","Melodi Mclendon","Rochell Leblanc","Jacqui Reasons","Freeda Mccroy","Vanna Runk","Florinda Milot","Cierra Lecompte","Nancey Kysar","Latasha Dalton","Charlyn Rinaldi","Erline Averett","Mariko Hillary","Rosalyn Trigg","Sherwood Brauer","Hortencia Olesen","Delana Kohut","Geoffrey Mcdade","Iona Delancey","Donnie Read","Cesar Bhatia","Evia Slate","Kaye Hugo","Denise Vento","Lang Kittle","Sherry Whittenberg","Jodi Bracero","Tamera Linneman","Katheryn Koelling","Tonia Shorty","Misha Baxley","Lisbeth Goering","Merle Ladwig","Tammie Omar","Jesusa Avilla","Alda Zabala","Junita Dogan","Jessia Anglin","Peggie Scranton","Dania Clodfelter","Janis Mccarthy","Edmund Galusha","Tonisha Posey","Arvilla Medley","Briana Barbour","Delfina Kiger","Nia Lenig","Ricarda Bulow","Odell Carson","Nydia Clonts","Andree Resendez","Daniela Puma","Sherill Paavola","Gilbert Bloomquist","Shanon Mach","Justin Bangert","Arden Hokanson","Evelyne Bridge","Hee Simek","Ward Deangelis","Jodie Childs","Janis Boehme","Beaulah Glowacki","Denver Stoneham","Tarra Vinton","Deborah Hummell","Ulysses Neil","Kathryn Marques","Rosanna Dake","Gavin Wheat","Tameka Stoke","Janella Clear","Kaye Ciriaco","Suk Bloxham","Gracia Whaley","Philomena Hemingway","Claudette Vaillancourt","Olevia Piche","Trey Chiles","Idalia Scardina","Jenine Tremble","Herbert Krider","Alycia Schrock","Miss Weibel","Pearlene Neidert","Kina Callender","Charlotte Skelley","Theodora Harrigan","Sydney Shreffler","Annamae Trinidad","Tobi Mumme","Rosia Elliot","Debbra Putt","Rena Delosantos","Genna Grennan","Nieves Huf","Berry Lugo","Ayana Verdugo","Joaquin Mazzei","Doris Harmon","Patience Poss","Magaret Zabel","Marylynn Hinojos","Earlene Marcantel","Yuki Evensen","Rema Gay","Delana Haak","Patricia Fetters","Vinnie Elrod","Octavia Bellew","Burma Revard","Lakenya Kato","Vinita Buchner","Sierra Margulies","Shae Funderburg","Jenae Groleau","Louetta Howie","Astrid Duffer","Caron Altizer","Kymberly Amavisca","Mohammad Diedrich","Thora Wrinkle","Bethel Wiemann","Patria Millet","Eldridge Burbach","Alyson Eddie","Zula Hanna","Devin Goodwin","Felipa Kirkwood","Kurtis Kempf","Kasey Lenart","Deena Blankenship","Kandra Wargo","Sherrie Cieslak","Ron Atha","Reggie Barreiro","Daria Saulter","Tandra Eastman","Donnell Lucious","Talisha Rosner","Emiko Bergh","Terresa Launius","Margy Hoobler","Marylou Stelling","Lavonne Justice","Kala Langstaff","China Truett","Louanne Dussault","Thomasena Samaniego","Charlesetta Tarbell","Fatimah Lade","Malisa Cantero","Florencia Litten","Francina Fraise","Patsy London","Deloris Mclaughlin"],
"Trainer":['Bettyann Savory','Mariah Barberio','Gordon Perrine','Pa Dargan','Blanch Victoria','Aldo Byler','Aldo Byler','Williams Camire','Junie Ritenour','Gordon Perrine','Bettyann Savory','Mariah Barberio','Aldo Byler','Barton Stecklein','Bettyann Savory','Barton Stecklein','Gordon Perrine','Pa Dargan','Aldo Byler','Brittani Brin','Bettyann Savory','Phyliss Houk','Bettyann Savory','Junie Ritenour','Aldo Byler','Calvin North','Brittani Brin','Junie Ritenour','Blanch Victoria','Brittani Brin','Bettyann Savory','Blanch Victoria','Mariah Barberio','Bettyann Savory','Blanch Victoria','Brittani Brin','Junie Ritenour','Pa Dargan','Gordon Perrine','Phyliss Houk','Pa Dargan','Mariah Barberio','Phyliss Houk','Phyliss Houk','Calvin North','Williams Camire','Brittani Brin','Gordon Perrine','Bettyann Savory','Bettyann Savory','Pa Dargan','Phyliss Houk','Barton Stecklein','Blanch Victoria','Coleman Dunmire','Phyliss Houk','Blanch Victoria','Pa Dargan','Harland Coolidge','Calvin North','Bettyann Savory','Phyliss Houk','Bettyann Savory','Harland Coolidge','Gordon Perrine','Junie Ritenour','Harland Coolidge','Blanch Victoria','Mariah Barberio','Coleman Dunmire','Aldo Byler','Bettyann Savory','Gordon Perrine','Bettyann Savory','Barton Stecklein','Harland Coolidge','Aldo Byler','Aldo Byler','Pa Dargan','Junie Ritenour','Brittani Brin','Junie Ritenour','Gordon Perrine','Mariah Barberio','Mariah Barberio','Mariah Barberio','Bettyann Savory','Brittani Brin','Aldo Byler','Phyliss Houk','Blanch Victoria','Pa Dargan','Phyliss Houk','Brittani Brin','Barton Stecklein','Coleman Dunmire','Bettyann Savory','Bettyann Savory','Gordon Perrine','Blanch Victoria','Junie Ritenour','Phyliss Houk','Coleman Dunmire','Williams Camire','Harland Coolidge','Williams Camire','Aldo Byler','Harland Coolidge','Gordon Perrine','Brittani Brin','Coleman Dunmire','Calvin North','Phyliss Houk','Brittani Brin','Aldo Byler','Bettyann Savory','Brittani Brin','Gordon Perrine','Calvin North','Harland Coolidge','Coleman Dunmire','Harland Coolidge','Aldo Byler','Junie Ritenour','Blanch Victoria','Harland Coolidge','Blanch Victoria','Junie Ritenour','Harland Coolidge','Junie Ritenour','Gordon Perrine','Brittani Brin','Coleman Dunmire','Williams Camire','Junie Ritenour','Brittani Brin','Calvin North','Barton Stecklein','Barton Stecklein','Mariah Barberio','Coleman Dunmire','Bettyann Savory','Mariah Barberio','Pa Dargan','Barton Stecklein','Coleman Dunmire','Brittani Brin','Barton Stecklein','Pa Dargan','Barton Stecklein','Junie Ritenour','Bettyann Savory','Williams Camire','Pa Dargan','Calvin North','Williams Camire','Coleman Dunmire','Aldo Byler','Barton Stecklein','Coleman Dunmire','Blanch Victoria','Mariah Barberio','Mariah Barberio','Harland Coolidge','Barton Stecklein','Phyliss Houk','Pa Dargan','Bettyann Savory','Barton Stecklein','Harland Coolidge','Junie Ritenour','Pa Dargan','Mariah Barberio','Blanch Victoria','Williams Camire','Phyliss Houk','Phyliss Houk','Coleman Dunmire','Mariah Barberio','Gordon Perrine','Coleman Dunmire','Brittani Brin','Pa Dargan','Coleman Dunmire','Brittani Brin','Blanch Victoria','Coleman Dunmire','Gordon Perrine','Coleman Dunmire','Aldo Byler','Aldo Byler','Mariah Barberio','Williams Camire','Phyliss Houk','Aldo Byler','Williams Camire','Aldo Byler','Williams Camire','Coleman Dunmire','Phyliss Houk'],
"Weight":[128,180,193,177,237,166,224,208,177,241,114,161,162,151,220,142,193,193,124,130,132,141,190,239,213,131,172,127,184,157,215,122,181,240,218,205,239,217,234,158,180,131,194,171,177,110,117,114,217,123,248,189,198,127,182,121,224,111,151,170,188,150,137,231,222,186,139,175,178,246,150,154,129,216,144,198,228,183,173,129,157,199,186,232,172,157,246,239,214,161,132,208,187,224,164,177,175,224,219,235,112,241,243,179,208,196,131,207,182,233,191,162,173,197,190,182,231,196,196,143,250,174,138,135,164,204,235,192,114,179,215,127,185,213,250,213,153,217,176,190,119,167,118,208,113,206,200,236,159,218,168,159,156,183,121,203,215,209,179,219,174,220,129,188,217,250,166,157,112,236,182,144,189,243,238,147,165,115,160,134,245,174,238,157,150,184,174,134,134,248,199,165,117,119,162,112,170,224,247,217],
"Membership (Days)":[52,70,148,124,186,157,127,155,37,185,158,129,93,69,124,13,76,153,164,161,48,121,167,69,39,163,7,34,176,169,108,162,195,86,155,77,197,200,80,142,179,67,58,145,188,147,125,15,13,173,125,4,61,29,132,110,62,137,197,135,162,174,32,151,149,65,18,42,63,62,104,200,189,40,38,199,1,12,8,2,195,30,7,72,130,144,2,34,200,143,43,196,22,115,171,54,143,59,14,52,109,115,187,185,26,19,178,18,120,169,45,52,130,69,168,178,96,22,78,152,39,51,118,130,60,156,108,69,103,158,165,142,86,91,117,77,57,169,86,188,97,111,22,83,81,177,163,35,12,164,21,181,171,138,22,107,58,51,38,128,19,193,157,13,104,89,13,10,26,190,179,101,7,159,100,49,120,109,56,199,51,108,47,171,69,162,74,119,148,88,32,159,65,146,140,171,88,18,59,13]
})
training_data.head()
# Convert the membership days into weeks and then add a this data in a new column to the DataFrame.
weeks = training_data["Membership (Days)"]/7
training_data["Membership (Weeks)"] = weeks
training_data.head()
# Create a Dataframe that has the Trainer, Weight, and Membership.
new_training_df = training_data[["Trainer", "Weight", "Membership (Days)"]]
new_training_df.head()
trainer_df = new_training_df.groupby(["Trainer"]).mean()
trainer_df
# Using groupby get the average weight and length membership for each trainer.
sum_df = pd.DataFrame(
new_training_df.groupby(["Trainer"]).sum())
sum_df
# Import Dependencies
import pandas as pd
# A seriously gigantic DataFrame of individuals' names, their trainers, their weight, and their days as gym members
training_data = pd.DataFrame({
"Name":["Gino Walker","Hiedi Wasser","Kerrie Wetzel","Elizabeth Sackett","Jack Mitten","Madalene Wayman","Jamee Horvath","Arlena Reddin","Tula Levan","Teisha Dreier","Leslie Carrier","Arlette Hartson","Romana Merkle","Heath Viviani","Andres Zimmer","Allyson Osman","Yadira Caggiano","Jeanmarie Friedrichs","Leann Ussery","Bee Mom","Pandora Charland","Karena Wooten","Elizabet Albanese","Augusta Borjas","Erma Yadon","Belia Lenser","Karmen Sancho","Edison Mannion","Sonja Hornsby","Morgan Frei","Florencio Murphy","Christoper Hertel","Thalia Stepney","Tarah Argento","Nicol Canfield","Pok Moretti","Barbera Stallings","Muoi Kelso","Cicely Ritz","Sid Demelo","Eura Langan","Vanita An","Frieda Fuhr","Ernest Fitzhenry","Ashlyn Tash","Melodi Mclendon","Rochell Leblanc","Jacqui Reasons","Freeda Mccroy","Vanna Runk","Florinda Milot","Cierra Lecompte","Nancey Kysar","Latasha Dalton","Charlyn Rinaldi","Erline Averett","Mariko Hillary","Rosalyn Trigg","Sherwood Brauer","Hortencia Olesen","Delana Kohut","Geoffrey Mcdade","Iona Delancey","Donnie Read","Cesar Bhatia","Evia Slate","Kaye Hugo","Denise Vento","Lang Kittle","Sherry Whittenberg","Jodi Bracero","Tamera Linneman","Katheryn Koelling","Tonia Shorty","Misha Baxley","Lisbeth Goering","Merle Ladwig","Tammie Omar","Jesusa Avilla","Alda Zabala","Junita Dogan","Jessia Anglin","Peggie Scranton","Dania Clodfelter","Janis Mccarthy","Edmund Galusha","Tonisha Posey","Arvilla Medley","Briana Barbour","Delfina Kiger","Nia Lenig","Ricarda Bulow","Odell Carson","Nydia Clonts","Andree Resendez","Daniela Puma","Sherill Paavola","Gilbert Bloomquist","Shanon Mach","Justin Bangert","Arden Hokanson","Evelyne Bridge","Hee Simek","Ward Deangelis","Jodie Childs","Janis Boehme","Beaulah Glowacki","Denver Stoneham","Tarra Vinton","Deborah Hummell","Ulysses Neil","Kathryn Marques","Rosanna Dake","Gavin Wheat","Tameka Stoke","Janella Clear","Kaye Ciriaco","Suk Bloxham","Gracia Whaley","Philomena Hemingway","Claudette Vaillancourt","Olevia Piche","Trey Chiles","Idalia Scardina","Jenine Tremble","Herbert Krider","Alycia Schrock","Miss Weibel","Pearlene Neidert","Kina Callender","Charlotte Skelley","Theodora Harrigan","Sydney Shreffler","Annamae Trinidad","Tobi Mumme","Rosia Elliot","Debbra Putt","Rena Delosantos","Genna Grennan","Nieves Huf","Berry Lugo","Ayana Verdugo","Joaquin Mazzei","Doris Harmon","Patience Poss","Magaret Zabel","Marylynn Hinojos","Earlene Marcantel","Yuki Evensen","Rema Gay","Delana Haak","Patricia Fetters","Vinnie Elrod","Octavia Bellew","Burma Revard","Lakenya Kato","Vinita Buchner","Sierra Margulies","Shae Funderburg","Jenae Groleau","Louetta Howie","Astrid Duffer","Caron Altizer","Kymberly Amavisca","Mohammad Diedrich","Thora Wrinkle","Bethel Wiemann","Patria Millet","Eldridge Burbach","Alyson Eddie","Zula Hanna","Devin Goodwin","Felipa Kirkwood","Kurtis Kempf","Kasey Lenart","Deena Blankenship","Kandra Wargo","Sherrie Cieslak","Ron Atha","Reggie Barreiro","Daria Saulter","Tandra Eastman","Donnell Lucious","Talisha Rosner","Emiko Bergh","Terresa Launius","Margy Hoobler","Marylou Stelling","Lavonne Justice","Kala Langstaff","China Truett","Louanne Dussault","Thomasena Samaniego","Charlesetta Tarbell","Fatimah Lade","Malisa Cantero","Florencia Litten","Francina Fraise","Patsy London","Deloris Mclaughlin"],
"Trainer":['Bettyann Savory','Mariah Barberio','Gordon Perrine','Pa Dargan','Blanch Victoria','Aldo Byler','Aldo Byler','Williams Camire','Junie Ritenour','Gordon Perrine','Bettyann Savory','Mariah Barberio','Aldo Byler','Barton Stecklein','Bettyann Savory','Barton Stecklein','Gordon Perrine','Pa Dargan','Aldo Byler','Brittani Brin','Bettyann Savory','Phyliss Houk','Bettyann Savory','Junie Ritenour','Aldo Byler','Calvin North','Brittani Brin','Junie Ritenour','Blanch Victoria','Brittani Brin','Bettyann Savory','Blanch Victoria','Mariah Barberio','Bettyann Savory','Blanch Victoria','Brittani Brin','Junie Ritenour','Pa Dargan','Gordon Perrine','Phyliss Houk','Pa Dargan','Mariah Barberio','Phyliss Houk','Phyliss Houk','Calvin North','Williams Camire','Brittani Brin','Gordon Perrine','Bettyann Savory','Bettyann Savory','Pa Dargan','Phyliss Houk','Barton Stecklein','Blanch Victoria','Coleman Dunmire','Phyliss Houk','Blanch Victoria','Pa Dargan','Harland Coolidge','Calvin North','Bettyann Savory','Phyliss Houk','Bettyann Savory','Harland Coolidge','Gordon Perrine','Junie Ritenour','Harland Coolidge','Blanch Victoria','Mariah Barberio','Coleman Dunmire','Aldo Byler','Bettyann Savory','Gordon Perrine','Bettyann Savory','Barton Stecklein','Harland Coolidge','Aldo Byler','Aldo Byler','Pa Dargan','Junie Ritenour','Brittani Brin','Junie Ritenour','Gordon Perrine','Mariah Barberio','Mariah Barberio','Mariah Barberio','Bettyann Savory','Brittani Brin','Aldo Byler','Phyliss Houk','Blanch Victoria','Pa Dargan','Phyliss Houk','Brittani Brin','Barton Stecklein','Coleman Dunmire','Bettyann Savory','Bettyann Savory','Gordon Perrine','Blanch Victoria','Junie Ritenour','Phyliss Houk','Coleman Dunmire','Williams Camire','Harland Coolidge','Williams Camire','Aldo Byler','Harland Coolidge','Gordon Perrine','Brittani Brin','Coleman Dunmire','Calvin North','Phyliss Houk','Brittani Brin','Aldo Byler','Bettyann Savory','Brittani Brin','Gordon Perrine','Calvin North','Harland Coolidge','Coleman Dunmire','Harland Coolidge','Aldo Byler','Junie Ritenour','Blanch Victoria','Harland Coolidge','Blanch Victoria','Junie Ritenour','Harland Coolidge','Junie Ritenour','Gordon Perrine','Brittani Brin','Coleman Dunmire','Williams Camire','Junie Ritenour','Brittani Brin','Calvin North','Barton Stecklein','Barton Stecklein','Mariah Barberio','Coleman Dunmire','Bettyann Savory','Mariah Barberio','Pa Dargan','Barton Stecklein','Coleman Dunmire','Brittani Brin','Barton Stecklein','Pa Dargan','Barton Stecklein','Junie Ritenour','Bettyann Savory','Williams Camire','Pa Dargan','Calvin North','Williams Camire','Coleman Dunmire','Aldo Byler','Barton Stecklein','Coleman Dunmire','Blanch Victoria','Mariah Barberio','Mariah Barberio','Harland Coolidge','Barton Stecklein','Phyliss Houk','Pa Dargan','Bettyann Savory','Barton Stecklein','Harland Coolidge','Junie Ritenour','Pa Dargan','Mariah Barberio','Blanch Victoria','Williams Camire','Phyliss Houk','Phyliss Houk','Coleman Dunmire','Mariah Barberio','Gordon Perrine','Coleman Dunmire','Brittani Brin','Pa Dargan','Coleman Dunmire','Brittani Brin','Blanch Victoria','Coleman Dunmire','Gordon Perrine','Coleman Dunmire','Aldo Byler','Aldo Byler','Mariah Barberio','Williams Camire','Phyliss Houk','Aldo Byler','Williams Camire','Aldo Byler','Williams Camire','Coleman Dunmire','Phyliss Houk'],
"Weight":[128,180,193,177,237,166,224,208,177,241,114,161,162,151,220,142,193,193,124,130,132,141,190,239,213,131,172,127,184,157,215,122,181,240,218,205,239,217,234,158,180,131,194,171,177,110,117,114,217,123,248,189,198,127,182,121,224,111,151,170,188,150,137,231,222,186,139,175,178,246,150,154,129,216,144,198,228,183,173,129,157,199,186,232,172,157,246,239,214,161,132,208,187,224,164,177,175,224,219,235,112,241,243,179,208,196,131,207,182,233,191,162,173,197,190,182,231,196,196,143,250,174,138,135,164,204,235,192,114,179,215,127,185,213,250,213,153,217,176,190,119,167,118,208,113,206,200,236,159,218,168,159,156,183,121,203,215,209,179,219,174,220,129,188,217,250,166,157,112,236,182,144,189,243,238,147,165,115,160,134,245,174,238,157,150,184,174,134,134,248,199,165,117,119,162,112,170,224,247,217],
"Membership (Days)":[52,70,148,124,186,157,127,155,37,185,158,129,93,69,124,13,76,153,164,161,48,121,167,69,39,163,7,34,176,169,108,162,195,86,155,77,197,200,80,142,179,67,58,145,188,147,125,15,13,173,125,4,61,29,132,110,62,137,197,135,162,174,32,151,149,65,18,42,63,62,104,200,189,40,38,199,1,12,8,2,195,30,7,72,130,144,2,34,200,143,43,196,22,115,171,54,143,59,14,52,109,115,187,185,26,19,178,18,120,169,45,52,130,69,168,178,96,22,78,152,39,51,118,130,60,156,108,69,103,158,165,142,86,91,117,77,57,169,86,188,97,111,22,83,81,177,163,35,12,164,21,181,171,138,22,107,58,51,38,128,19,193,157,13,104,89,13,10,26,190,179,101,7,159,100,49,120,109,56,199,51,108,47,171,69,162,74,119,148,88,32,159,65,146,140,171,88,18,59,13]
})
training_data.head()
# Convert the membership days into weeks and then add a this data in a new column to the DataFrame.
weeks = training_data["Membership (Days)"]/7
training_data["Membership (Weeks)"] = weeks
training_data.head()
# Create a Dataframe that has the Trainer, Weight, and Membership.
trainers_data = training_data[["Trainer", "Weight", "Membership (Days)", "Membership (Weeks)"]]
trainers_data
# Using groupby get the average weight and length membership for each trainer.
trainers_means = trainers_data.groupby(["Trainer"]).mean()
trainers_means
trainers_means.sort_values(by='Membership (Days)', ascending=False)
# Import Dependencies
import pandas as pd
# Load in file
final_exam_scores = "resources/final_exam_scores.csv"
df = pd.read_csv(final_exam_scores)
df.head(10)
# Create the bins in which Data will be held
# Bins are 0, 59, 69, 79, 89, 100.
bins = [0, 59, 69, 79, 89, 100]
# Create the names for the four bins
group_names = ["F", "D", "C", "B", "A"]
df["Grade"] = pd.cut(df["Final Exam"], bins, labels=group_names)
df.head(10)
# Get the summary statitics of each letter grade.
df = df.groupby("Grade")
df.describe()
# Import Dependencies
import pandas as pd
# Create a path to the csv and read it into a Pandas DataFrame
csv_path = "Resources/ted_talks.csv"
ted_df = pd.read_csv(csv_path)
ted_df.head()
# Figure out the minimum and maximum views for a TED Talk
print(ted_df["views"].min())
print(ted_df["views"].max())
# Create bins in which to place values based upon TED Talk views
bins = [0, 199999, 399999, 599999, 799999, 999999,
1999999, 2999999, 3999999, 4999999, 50000000]
# Create labels for these bins
bin_labels = ["0 to 199k", "200k to 399k", "400k to 599k", "600k to 799k", "800k to 999k",
"1mil to 2mil", "2mil to 3mil", "3mil to 4mil", "4mil to 5mil", "5mil to 50mil"]
# Slice the data and place it into bins
# Place the data series into a new column inside of the DataFrame
ted_df["View ranges"] = pd.cut(ted_df["views"], bins, labels=bin_labels)
ted_df.head()
# Create a GroupBy object based upon "View ranges"
view_group = ted_df.groupby("View ranges")
# Find how many rows fall into each bin
ted_df["View ranges"].value_counts()
#print(view_group["comments"].count())
# Get the average of each column within the GroupBy object
view_group.mean()
#view_group[["comments", "duration", "languages"]].mean()
# Import Dependencies
import pandas as pd
# Create a path to the csv and read it into a Pandas DataFrame
csv_path = "Resources/ted_talks.csv"
ted_df = pd.read_csv(csv_path)
ted_df.head()
# Figure out the minimum and maximum views for a TED Talk
print(ted_df["views"].max())
print(ted_df["views"].min())
# Create bins in which to place values based upon TED Talk views
bins = [0, 199999, 399999, 599999, 799999, 999999,
1999999, 2999999, 3999999, 4999999, 50000000]
# Create labels for these bins
group_labels = ["0 to 199k", "200k to 399k", "400k to 599k", "600k to 799k", "800k to 999k", "1mil to 2mil",
"2mil to 3mil", "3mil to 4mil", "4mil to 5mil", "5mil to 50mil"]
# Slice the data and place it into bins
pd.cut(ted_df["views"], bins, labels=group_labels).head()
# Place the data series into a new column inside of the DataFrame
ted_df["View Group"] = pd.cut(ted_df["views"], bins, labels=group_labels)
ted_df.head()
# Create a GroupBy object based upon "View Group"
ted_group = ted_df.groupby("View Group")
# Find how many rows fall into each bin
print(ted_group["comments"].count())
# Get the average of each column within the GroupBy object
ted_group[["comments", "duration", "languages"]].mean()
| 0.680879 | 0.812607 |
```
import pandas as pd
df1 = pd.read_csv('2-fft-normal-n-0-3-data.csv')
df2 = pd.read_csv('2-fft-normal-n-0-5-data.csv')
df3 = pd.read_csv('2-fft-normal-n-0-10-data.csv')
df4 = pd.read_csv('2-fft-normal-n-0-12-data.csv')
df5 = pd.read_csv('2-fft-normal-n-0-15-data.csv')
df1 = df1.loc[df1['router'] == 1]
df2 = df2.loc[df2['router'] == 1]
df3 = df3.loc[df3['router'] == 1]
df4 = df4.loc[df4['router'] == 1]
df5 = df5.loc[df5['router'] == 1]
df1 = df1.drop(columns=['router'])
df2 = df2.drop(columns=['router'])
df3 = df3.drop(columns=['router'])
df4 = df4.drop(columns=['router'])
df5 = df5.drop(columns=['router'])
df1.to_csv('2-fft-normal-n-0-3-data-r1-good.csv',index=False)
df2.to_csv('2-fft-normal-n-0-5-data-r1-good.csv',index=False)
df3.to_csv('2-fft-normal-n-0-10-data-r1-good.csv',index=False)
df4.to_csv('2-fft-normal-n-0-12-data-r1-good.csv',index=False)
df5.to_csv('2-fft-normal-n-0-15-data-r1-good.csv',index=False)
df1 = pd.read_csv('2-fft-normal-n-0-3-data-r1-good.csv')
df2 = pd.read_csv('2-fft-normal-n-0-5-data-r1-good.csv')
df3 = pd.read_csv('2-fft-normal-n-0-10-data-r1-good.csv')
df4 = pd.read_csv('2-fft-normal-n-0-12-data-r1-good.csv')
df5 = pd.read_csv('2-fft-normal-n-0-15-data-r1-good.csv')
def timecount(df):
timearr = []
interval = 99
count = 0
for index, row in df.iterrows():
if row["timestamp"]<=interval:
count+=1
else:
timearr.append([interval+1,count])
count=1
interval+=100
timearr.append([interval+1,count])
return timearr
def maxcount(timearr,df):
countarr = []
increarr = []
maxarr = []
for i in range(len(timearr)):
for cnt in range(timearr[i][1],0,-1):
countarr.append(cnt)
maxarr.append(timearr[i][1])
increment = timearr[i][1] - cnt + 1
increarr.append(increment)
df = df.assign(packet_count_decr=countarr)
df = df.assign(packet_count_incr=increarr)
df = df.assign(max_packet_count=maxarr)
return df
df1 = maxcount(timecount(df1),df1)
df2 = maxcount(timecount(df2),df2)
df3 = maxcount(timecount(df3),df3)
df4 = maxcount(timecount(df4),df4)
df5 = maxcount(timecount(df5),df5)
df1
def rename(df):
df['traversal_id'] = df['traversal_id']+1
df["packet_count_index"] = df["packet_count_decr"]*df["packet_count_incr"]
df["packet_max_index"] = df["packet_count_index"]*df["max_packet_count"]
df["port_index"] = df["outport"]*df["inport"]
df["cache_coherence_flit_index"] = df["cache_coherence_type"]*df["flit_id"]
df["flit_index"] = df["cache_coherence_flit_index"]*df["flit_type"]
df["traversal_index"] = df["flit_index"]*df["traversal_id"]
df["cache_coherence_vnet_index"] = df["cache_coherence_type"]*df["vnet"]
df["vnet_vc_index"] = df["vnet"]*df["vc"]
df["vnet_vc_cc_index"] = df["vnet"]*df["cache_coherence_vnet_index"]
df.head(50)
rename(df1)
rename(df2)
rename(df3)
rename(df4)
rename(df5)
df1['target'] = 1
df2['target'] = 1
df3['target'] = 1
df4['target'] = 1
df5['target'] = 1
df1
df2
df3
df4
df5
df1.dtypes
df1.to_csv('2-fft-normal-n-0-3-data-r1-good.csv',index=False)
df2.to_csv('2-fft-normal-n-0-5-data-r1-good.csv',index=False)
df3.to_csv('2-fft-normal-n-0-10-data-r1-good.csv',index=False)
df4.to_csv('2-fft-normal-n-0-12-data-r1-good.csv',index=False)
df5.to_csv('2-fft-normal-n-0-15-data-r1-good.csv',index=False)
```
|
github_jupyter
|
import pandas as pd
df1 = pd.read_csv('2-fft-normal-n-0-3-data.csv')
df2 = pd.read_csv('2-fft-normal-n-0-5-data.csv')
df3 = pd.read_csv('2-fft-normal-n-0-10-data.csv')
df4 = pd.read_csv('2-fft-normal-n-0-12-data.csv')
df5 = pd.read_csv('2-fft-normal-n-0-15-data.csv')
df1 = df1.loc[df1['router'] == 1]
df2 = df2.loc[df2['router'] == 1]
df3 = df3.loc[df3['router'] == 1]
df4 = df4.loc[df4['router'] == 1]
df5 = df5.loc[df5['router'] == 1]
df1 = df1.drop(columns=['router'])
df2 = df2.drop(columns=['router'])
df3 = df3.drop(columns=['router'])
df4 = df4.drop(columns=['router'])
df5 = df5.drop(columns=['router'])
df1.to_csv('2-fft-normal-n-0-3-data-r1-good.csv',index=False)
df2.to_csv('2-fft-normal-n-0-5-data-r1-good.csv',index=False)
df3.to_csv('2-fft-normal-n-0-10-data-r1-good.csv',index=False)
df4.to_csv('2-fft-normal-n-0-12-data-r1-good.csv',index=False)
df5.to_csv('2-fft-normal-n-0-15-data-r1-good.csv',index=False)
df1 = pd.read_csv('2-fft-normal-n-0-3-data-r1-good.csv')
df2 = pd.read_csv('2-fft-normal-n-0-5-data-r1-good.csv')
df3 = pd.read_csv('2-fft-normal-n-0-10-data-r1-good.csv')
df4 = pd.read_csv('2-fft-normal-n-0-12-data-r1-good.csv')
df5 = pd.read_csv('2-fft-normal-n-0-15-data-r1-good.csv')
def timecount(df):
timearr = []
interval = 99
count = 0
for index, row in df.iterrows():
if row["timestamp"]<=interval:
count+=1
else:
timearr.append([interval+1,count])
count=1
interval+=100
timearr.append([interval+1,count])
return timearr
def maxcount(timearr,df):
countarr = []
increarr = []
maxarr = []
for i in range(len(timearr)):
for cnt in range(timearr[i][1],0,-1):
countarr.append(cnt)
maxarr.append(timearr[i][1])
increment = timearr[i][1] - cnt + 1
increarr.append(increment)
df = df.assign(packet_count_decr=countarr)
df = df.assign(packet_count_incr=increarr)
df = df.assign(max_packet_count=maxarr)
return df
df1 = maxcount(timecount(df1),df1)
df2 = maxcount(timecount(df2),df2)
df3 = maxcount(timecount(df3),df3)
df4 = maxcount(timecount(df4),df4)
df5 = maxcount(timecount(df5),df5)
df1
def rename(df):
df['traversal_id'] = df['traversal_id']+1
df["packet_count_index"] = df["packet_count_decr"]*df["packet_count_incr"]
df["packet_max_index"] = df["packet_count_index"]*df["max_packet_count"]
df["port_index"] = df["outport"]*df["inport"]
df["cache_coherence_flit_index"] = df["cache_coherence_type"]*df["flit_id"]
df["flit_index"] = df["cache_coherence_flit_index"]*df["flit_type"]
df["traversal_index"] = df["flit_index"]*df["traversal_id"]
df["cache_coherence_vnet_index"] = df["cache_coherence_type"]*df["vnet"]
df["vnet_vc_index"] = df["vnet"]*df["vc"]
df["vnet_vc_cc_index"] = df["vnet"]*df["cache_coherence_vnet_index"]
df.head(50)
rename(df1)
rename(df2)
rename(df3)
rename(df4)
rename(df5)
df1['target'] = 1
df2['target'] = 1
df3['target'] = 1
df4['target'] = 1
df5['target'] = 1
df1
df2
df3
df4
df5
df1.dtypes
df1.to_csv('2-fft-normal-n-0-3-data-r1-good.csv',index=False)
df2.to_csv('2-fft-normal-n-0-5-data-r1-good.csv',index=False)
df3.to_csv('2-fft-normal-n-0-10-data-r1-good.csv',index=False)
df4.to_csv('2-fft-normal-n-0-12-data-r1-good.csv',index=False)
df5.to_csv('2-fft-normal-n-0-15-data-r1-good.csv',index=False)
| 0.138345 | 0.141015 |
```
import numpy as np
from tqdm import tqdm
from matplotlib import pyplot as plt
import matplotlib.lines as mlines
import matplotlib
%matplotlib inline
from ovejero import model_trainer, data_tools, bnn_inference
import corner
import os
def NOTIMPLEMENTED():
raise NotImplementedError('Must specify config/save path')
```
# Testing the Performance of a Model That Has Been Fit
__Author:__ Sebastian Wagner-Carena
__Last Run:__ 08/04/2020
__Goals:__ Learn how to test the performance of a trained model on the validation set.
__Before running this notebook:__ Run the Train_Toy_Model notebook to understand how to train a model. Then train a model with whatever configuration you want. You will have to add the path to the config file in this notebook.
To start, all we have to do is load up our model weights and run it on the validation set. Thankfully, that's pretty easy to do with the BNN inference class. If you don't have a GPU, generating samples for the full validation set can be time consuming (30-40 minutes for 1000 samples). However, by specifying a save path for the samples we only need to do this once.
```
# First specify the config path
config_path = NOTIMPLEMENTED()
# Check that the config has what you need
cfg = model_trainer.load_config(config_path)
# The InferenceClass will do all the heavy lifting of preparing the model from the configuration file,
# initializing the validation dataset, and providing outputs correctly marginalized over the BNN uncertainties.
bnn_infer = bnn_inference.InferenceClass(cfg)
# Now we just have to ask the InferenceClass to spin up some samples from our BNN. The more samples, the more
# accurate our plots and metrics will be. The right value to use unfortunately requires a bit of trial and error.
# 1000 is a good starting point though.
num_samples = 1000
sample_save_dir = NOTIMPLEMENTED()
bnn_infer.gen_samples(num_samples,sample_save_dir=sample_save_dir)
```
Now that we set up our infastructure, the first thing we want to do is inspect the statistics of our network's performance over the validation set.
```
bnn_infer.report_stats()
```
We can also inspect a coverage plot of our parameters. If our model is performing well, we expect our data to roughly follow the 68-95-99.7 rule.
```
bnn_infer.gen_coverage_plots()
```
Another good check is to see the posterior of some example images.
```
image_index = 5
bnn_infer.plot_posterior_contours(image_index)
```
It's important to understand where our uncertainty is coming from. We can inspect wether our uncertainty is dominated by aleatoric or epistemic sources.
```
bnn_infer.comp_al_ep_unc()
```
At the end what we want our network's posterior to be well calibrated. That means that the truth should be a representative draw from the distribution we're predicting. The exact sampling that goes into the calibration plot is complicated, but the x axis repesents how much of the data the model expects to fall within a certain region of our posterior, and the y axis represents how much data actually falls within that region. Ideally this would be a straight line (y=x), but in practice our model is likely to be overconfident, underconfident, or some combination of both. The lower right hand corner of our plot represents overconfidence, and the upper right hand corner represents underconfidence.
```
color_map = ["#377eb8", "#4daf4a"]
n_perc_points = 30
fig = bnn_infer.plot_calibration(color_map=color_map,n_perc_points=n_perc_points)
```
## Understanding the Calibration Plot
Throughout our paper we argue that the calibration plot is the best metric to asses the quality of the BNN posterior. Here, we include a few examples to give a better feel for the calibration plot. We focus on toy 2D models since those are easy to visualize and conceptualize. We can start with a biased 2D posterior prediction.
```
# First we'll make a class to generate our comparison
matplotlib.rcParams.update({'font.size': 13})
def plot_toy_model_calibration(data_mean,data_cov,post_mean,post_cov,toy_batch_size,n_draws,
fit_guass_data=False):
bnn_toy = bnn_inference.InferenceClass(cfg)
# We generate our toy data
data = np.random.multivariate_normal(data_mean,data_cov,(toy_batch_size))
# Now we generate our posterior means and covariances
post_samples = np.random.multivariate_normal(post_mean,post_cov,(n_draws,toy_batch_size))
# We change our bnn inference instance to have these values
bnn_toy.samples_init = True
bnn_toy.y_pred = np.mean(post_samples,axis=0)
bnn_toy.predict_samps = post_samples
bnn_toy.y_test = data
# We can visualize the true data and the posterior, and compare that to the calibration plot.
color_map=["#377eb8", "#4daf4a"]
fig = corner.corner(post_samples.reshape(-1,2),bins=20,labels=['x','y'],show_titles=False, plot_datapoints=False,
label_kwargs=dict(fontsize=15),levels=[0.68,0.95],dpi=200,
color=color_map[1],fill_contours=True,range=[[-6,6],[-6,6]])
fig.axes[2].plot(data[:,0],data[:,1],'.',c=color_map[0],alpha=0.1)
post_line = mlines.Line2D([], [], color=color_map[0], label='True Posterior')
data_line = mlines.Line2D([], [], color=color_map[1], label='Inferred Posterior')
plt.legend(handles=[post_line,data_line], bbox_to_anchor=(0.05, 1.0, 1., .0), loc=4,fontsize=12)
plt.show()
cal_fig = bnn_toy.plot_calibration(n_perc_points=30,title='',
legend=['Perfect Calibration','Inferred Posterior Calibration'])
return (fig,cal_fig)
# We start with our offset posterior
data_mean = np.zeros(2)
data_cov = np.eye(2)
toy_batch_size = 10000
n_draws = 1000
post_mean = np.ones(2)*2
post_cov=np.eye(2)
post_fig, cal_fig = plot_toy_model_calibration(data_mean,data_cov,post_mean,post_cov,toy_batch_size,n_draws)
post_fig.savefig('../paper/figures/appendix/offset_corn.pdf')
cal_fig.savefig('../paper/figures/appendix/offset_cal.pdf')
```
The posterior we're predicting is offset from the truth, so our model is consistently overconfident. We can repeat the exercise with a posterior that is correctly centered but has a much tighter contour. We still expect our model to be overconfident.
```
data_mean = np.zeros(2)
data_cov = np.eye(2)
toy_batch_size = 10000
n_draws = 1000
post_mean = np.zeros(2)
post_cov=np.eye(2)*0.3
_ = plot_toy_model_calibration(data_mean,data_cov,post_mean,post_cov,toy_batch_size,n_draws = 1000)
```
Once again, our model is overconfident. We can similary see what happens when our model is underconfident by expanding our contours.
```
data_mean = np.zeros(2)
data_cov = np.eye(2)
toy_batch_size = 10000
n_draws = 1000
post_mean = np.zeros(2)
post_cov=np.eye(2)*3
post_fig, cal_fig = plot_toy_model_calibration(data_mean,data_cov,post_mean,post_cov,toy_batch_size,n_draws)
post_fig.savefig('../paper/figures/appendix/underconf_corn.pdf')
cal_fig.savefig('../paper/figures/appendix/underconf_cal.pdf')
```
The model posterior here is underconfident - almost 90% of the data falls within the 1 sigma countour. We can look at a more realistic example - a Gaussian posterior with no covariance trying to fit data with covariance.
```
# We start with our offset posterior
data_mean = np.zeros(2)
data_cov = np.array([[1,0.99],[0.99,1]])
toy_batch_size = 10000
n_draws = 1000
post_mean = np.zeros(2)
post_cov=np.diag(np.std(np.random.multivariate_normal(data_mean,data_cov,(toy_batch_size)),axis=0))
_ = plot_toy_model_calibration(data_mean,data_cov,post_mean,post_cov,toy_batch_size,n_draws)
```
This comes off mostly as overconfident by our network - it's not capturing the extreme covariance in the data, causing the networks contours to assign too little probabilistic weight to the tails.
Another issue our network may have is that the posterior we pick is not sufficiently multimodal to capture the true distribution of the data (or the multimodality is poorly tuned). We can see what this looks like by fitting a full covariance matrix posterior to multimodal data.
```
# First we'll make a class to generate our comparison
def plot_toy_model_calibration_gm(data_means,data_covs,post_mean,post_cov,toy_batch_size,ps,n_draws,
fit_guass_data=False):
bnn_toy = bnn_inference.InferenceClass(cfg)
# We generate our toy data
data = []
for dmi in range(len(data_means)):
data.append(np.random.multivariate_normal(data_means[dmi],data_covs[dmi],(int(toy_batch_size*ps[dmi]))))
data = np.concatenate(data,axis=0)
if fit_guass_data == True:
post_mean = np.mean(data,axis=0)
post_cov=np.diag(np.std(data,axis=0))
# Now we generate our posterior means and covariances
post_samples = np.random.multivariate_normal(post_mean,post_cov,(n_draws,toy_batch_size))
# We change our bnn inference instance to have these values
bnn_toy.samples_init = True
bnn_toy.y_pred = np.mean(post_samples,axis=0)
bnn_toy.predict_samps = post_samples
bnn_toy.y_test = data
# We can visualize the true data and the posterior, and compare that to the calibration plot.
color_map=["#377eb8", "#4daf4a"]
fig = corner.corner(post_samples.reshape((-1,2)),bins=20,labels=['x','y'],show_titles=False,
plot_datapoints=False,label_kwargs=dict(fontsize=15),levels=[0.68,0.95],dpi=1600,
color=color_map[1],fill_contours=True,range=[[-6,6],[-6,6]])
fig.axes[2].plot(data[:,0],data[:,1],'.',c=color_map[0],alpha=0.1)
post_line = mlines.Line2D([], [], color=color_map[0], label='True Posterior')
data_line = mlines.Line2D([], [], color=color_map[1], label='Inferred Posterior')
plt.legend(handles=[data_line,post_line], bbox_to_anchor=(0.05, 1.0, 1., .0), loc=4,fontsize=12.0)
plt.show()
cal_fig = bnn_toy.plot_calibration(n_perc_points=30,title='',
legend=['Perfect Calibration','Inferred Posterior Calibration'])
return (fig,cal_fig)
# Estimate a single Gaussian from the multimodal data.
data_means = [np.ones(2)*3,np.zeros(2)]
data_covs = [np.array([[0.4,0],[0,0.4]]),np.array([[0.4,0],[0,0.4]])]
ps = [0.9,0.1]
toy_batch_size = 10000
n_draws = 1000
data = []
for dmi in range(len(data_means)):
data.append(np.random.multivariate_normal(data_means[dmi],data_covs[dmi],(toy_batch_size//len(
data_mean))))
data = np.concatenate(data,axis=0)
post_mean = np.mean(data,axis=0)
post_cov=np.diag(np.std(data,axis=0))
post_fig, cal_fig = plot_toy_model_calibration_gm(data_means,data_covs,post_mean,post_cov,toy_batch_size,
ps,n_draws,fit_guass_data=True)
post_fig.savefig('../paper/figures/appendix/biv_corn.pdf')
cal_fig.savefig('../paper/figures/appendix/biv_cal.pdf')
```
Interestingly, the multimodal data leads to both under and over confidence by our network. In the interior region, corresponding to the principle mode, the toy prediction is has slightly too large covariances. In the tails, where our second mode becomes relevant, our single Gaussian prediction is suddenly very underconfident (since it assigns almost no weight to the second mode).
|
github_jupyter
|
import numpy as np
from tqdm import tqdm
from matplotlib import pyplot as plt
import matplotlib.lines as mlines
import matplotlib
%matplotlib inline
from ovejero import model_trainer, data_tools, bnn_inference
import corner
import os
def NOTIMPLEMENTED():
raise NotImplementedError('Must specify config/save path')
# First specify the config path
config_path = NOTIMPLEMENTED()
# Check that the config has what you need
cfg = model_trainer.load_config(config_path)
# The InferenceClass will do all the heavy lifting of preparing the model from the configuration file,
# initializing the validation dataset, and providing outputs correctly marginalized over the BNN uncertainties.
bnn_infer = bnn_inference.InferenceClass(cfg)
# Now we just have to ask the InferenceClass to spin up some samples from our BNN. The more samples, the more
# accurate our plots and metrics will be. The right value to use unfortunately requires a bit of trial and error.
# 1000 is a good starting point though.
num_samples = 1000
sample_save_dir = NOTIMPLEMENTED()
bnn_infer.gen_samples(num_samples,sample_save_dir=sample_save_dir)
bnn_infer.report_stats()
bnn_infer.gen_coverage_plots()
image_index = 5
bnn_infer.plot_posterior_contours(image_index)
bnn_infer.comp_al_ep_unc()
color_map = ["#377eb8", "#4daf4a"]
n_perc_points = 30
fig = bnn_infer.plot_calibration(color_map=color_map,n_perc_points=n_perc_points)
# First we'll make a class to generate our comparison
matplotlib.rcParams.update({'font.size': 13})
def plot_toy_model_calibration(data_mean,data_cov,post_mean,post_cov,toy_batch_size,n_draws,
fit_guass_data=False):
bnn_toy = bnn_inference.InferenceClass(cfg)
# We generate our toy data
data = np.random.multivariate_normal(data_mean,data_cov,(toy_batch_size))
# Now we generate our posterior means and covariances
post_samples = np.random.multivariate_normal(post_mean,post_cov,(n_draws,toy_batch_size))
# We change our bnn inference instance to have these values
bnn_toy.samples_init = True
bnn_toy.y_pred = np.mean(post_samples,axis=0)
bnn_toy.predict_samps = post_samples
bnn_toy.y_test = data
# We can visualize the true data and the posterior, and compare that to the calibration plot.
color_map=["#377eb8", "#4daf4a"]
fig = corner.corner(post_samples.reshape(-1,2),bins=20,labels=['x','y'],show_titles=False, plot_datapoints=False,
label_kwargs=dict(fontsize=15),levels=[0.68,0.95],dpi=200,
color=color_map[1],fill_contours=True,range=[[-6,6],[-6,6]])
fig.axes[2].plot(data[:,0],data[:,1],'.',c=color_map[0],alpha=0.1)
post_line = mlines.Line2D([], [], color=color_map[0], label='True Posterior')
data_line = mlines.Line2D([], [], color=color_map[1], label='Inferred Posterior')
plt.legend(handles=[post_line,data_line], bbox_to_anchor=(0.05, 1.0, 1., .0), loc=4,fontsize=12)
plt.show()
cal_fig = bnn_toy.plot_calibration(n_perc_points=30,title='',
legend=['Perfect Calibration','Inferred Posterior Calibration'])
return (fig,cal_fig)
# We start with our offset posterior
data_mean = np.zeros(2)
data_cov = np.eye(2)
toy_batch_size = 10000
n_draws = 1000
post_mean = np.ones(2)*2
post_cov=np.eye(2)
post_fig, cal_fig = plot_toy_model_calibration(data_mean,data_cov,post_mean,post_cov,toy_batch_size,n_draws)
post_fig.savefig('../paper/figures/appendix/offset_corn.pdf')
cal_fig.savefig('../paper/figures/appendix/offset_cal.pdf')
data_mean = np.zeros(2)
data_cov = np.eye(2)
toy_batch_size = 10000
n_draws = 1000
post_mean = np.zeros(2)
post_cov=np.eye(2)*0.3
_ = plot_toy_model_calibration(data_mean,data_cov,post_mean,post_cov,toy_batch_size,n_draws = 1000)
data_mean = np.zeros(2)
data_cov = np.eye(2)
toy_batch_size = 10000
n_draws = 1000
post_mean = np.zeros(2)
post_cov=np.eye(2)*3
post_fig, cal_fig = plot_toy_model_calibration(data_mean,data_cov,post_mean,post_cov,toy_batch_size,n_draws)
post_fig.savefig('../paper/figures/appendix/underconf_corn.pdf')
cal_fig.savefig('../paper/figures/appendix/underconf_cal.pdf')
# We start with our offset posterior
data_mean = np.zeros(2)
data_cov = np.array([[1,0.99],[0.99,1]])
toy_batch_size = 10000
n_draws = 1000
post_mean = np.zeros(2)
post_cov=np.diag(np.std(np.random.multivariate_normal(data_mean,data_cov,(toy_batch_size)),axis=0))
_ = plot_toy_model_calibration(data_mean,data_cov,post_mean,post_cov,toy_batch_size,n_draws)
# First we'll make a class to generate our comparison
def plot_toy_model_calibration_gm(data_means,data_covs,post_mean,post_cov,toy_batch_size,ps,n_draws,
fit_guass_data=False):
bnn_toy = bnn_inference.InferenceClass(cfg)
# We generate our toy data
data = []
for dmi in range(len(data_means)):
data.append(np.random.multivariate_normal(data_means[dmi],data_covs[dmi],(int(toy_batch_size*ps[dmi]))))
data = np.concatenate(data,axis=0)
if fit_guass_data == True:
post_mean = np.mean(data,axis=0)
post_cov=np.diag(np.std(data,axis=0))
# Now we generate our posterior means and covariances
post_samples = np.random.multivariate_normal(post_mean,post_cov,(n_draws,toy_batch_size))
# We change our bnn inference instance to have these values
bnn_toy.samples_init = True
bnn_toy.y_pred = np.mean(post_samples,axis=0)
bnn_toy.predict_samps = post_samples
bnn_toy.y_test = data
# We can visualize the true data and the posterior, and compare that to the calibration plot.
color_map=["#377eb8", "#4daf4a"]
fig = corner.corner(post_samples.reshape((-1,2)),bins=20,labels=['x','y'],show_titles=False,
plot_datapoints=False,label_kwargs=dict(fontsize=15),levels=[0.68,0.95],dpi=1600,
color=color_map[1],fill_contours=True,range=[[-6,6],[-6,6]])
fig.axes[2].plot(data[:,0],data[:,1],'.',c=color_map[0],alpha=0.1)
post_line = mlines.Line2D([], [], color=color_map[0], label='True Posterior')
data_line = mlines.Line2D([], [], color=color_map[1], label='Inferred Posterior')
plt.legend(handles=[data_line,post_line], bbox_to_anchor=(0.05, 1.0, 1., .0), loc=4,fontsize=12.0)
plt.show()
cal_fig = bnn_toy.plot_calibration(n_perc_points=30,title='',
legend=['Perfect Calibration','Inferred Posterior Calibration'])
return (fig,cal_fig)
# Estimate a single Gaussian from the multimodal data.
data_means = [np.ones(2)*3,np.zeros(2)]
data_covs = [np.array([[0.4,0],[0,0.4]]),np.array([[0.4,0],[0,0.4]])]
ps = [0.9,0.1]
toy_batch_size = 10000
n_draws = 1000
data = []
for dmi in range(len(data_means)):
data.append(np.random.multivariate_normal(data_means[dmi],data_covs[dmi],(toy_batch_size//len(
data_mean))))
data = np.concatenate(data,axis=0)
post_mean = np.mean(data,axis=0)
post_cov=np.diag(np.std(data,axis=0))
post_fig, cal_fig = plot_toy_model_calibration_gm(data_means,data_covs,post_mean,post_cov,toy_batch_size,
ps,n_draws,fit_guass_data=True)
post_fig.savefig('../paper/figures/appendix/biv_corn.pdf')
cal_fig.savefig('../paper/figures/appendix/biv_cal.pdf')
| 0.644673 | 0.864939 |
```
%matplotlib inline
%load_ext autoreload
%autoreload 2
import pandas as pd
import numpy as np
import math
import feather
from sklearn import preprocessing
from sklearn.metrics.pairwise import cosine_similarity
```
# Explore tags and book_tags
```
tags = pd.read_csv('./data/tags.csv')
books = feather.read_dataframe('./data/books_small')
book_tags = pd.read_csv('./data/book_tags.csv')
book_tags = book_tags[book_tags.goodreads_book_id.isin(books.goodreads_book_id)]
tags.head()
book_tags.head()
(tags.shape, book_tags.shape)
```
# Filter out tags
34k tags, or keywords might be a little bit unnecessary, considering that there are some meaningless ones, like --1- or --10-, which will probably have very low count. Let's find out how many count each tag has and which ones are the most popular.
```
# Remove 2 tag with negative count
counts = book_tags.groupby('tag_id').sum()[['count']].sort_values(by='count', ascending=False)[:-2]
counts['tag_name'] = tags.set_index('tag_id', drop=True).loc[counts.index]
counts.head(15)
```
Unfortunately, some of the most popular tags are not book attributes that we can utilize in our similarity function, for example 'to-read' and 'favorites' are not inherent qualties of a book. For this problem, the only solution available right now is to clean them manually. I'll go through the top 300 tags and remove tags that are intuitively not innate characteristics of a book.
```
tags_to_skip = [30574, 8717, 11557, 5207, 22743, 17213, 4949, 18045, 11590,
22753, 9221, 30521, 20774, 15169, 25151, 32586, 20849, 25152, 25150, 25149,
24960, 11497, 2104, 22689, 25148, 11505, 1128, 9638, 9477, 240, 25153,
24997, 25147, 24999, 24996, 5051, 5481, 14064, 1078, 27535, 24995, 236,
25822, 31505, 29316, 25765, 25146, 9484, 11978, 11550, 20824, 30672, 22352,
26256, 783, 11491, 831, 20731, 251, 24993, 751, 7478, 21815, 4959, 25506,
25000, 4985, 30573, 22330, 17217, 5444, 32099, 14764, 18051, 722, 11503,
21274, 5180, 20781, 20811]
print("Tags skipped:", len(tags_to_skip))
counts.loc[tags_to_skip].head(10)
```
So, 80 out of the top 300 are tags that are either totally irrelevant or not relevant enough to be considered an attribute of a book. Somewhere between 50 and 300 keywords is a good number so we're happy with 220.
```
filtered_tags = counts.loc[~counts.index.isin(tags_to_skip)][:220]
print("Tags kept:", len(filtered_tags))
filtered_tags.head(15)
```
Looks much more natural now. There can still be improvement, such as removal of duplicates, but we'll put that for a later date for now. Now we will filter out the book_tags using the tags we have chosen.
```
tags_kept = filtered_tags.index
filtered_book_tags = book_tags[book_tags.tag_id.isin(tags_kept)]
# Remove invalid entry (count = -1 and duplicated)
filtered_book_tags = filtered_book_tags[filtered_book_tags['count']>0].drop(265128, axis=0)
filtered_book_tags.shape
# The number of books that has tags
len(filtered_book_tags.goodreads_book_id.unique())
```
All books have some kind of tags, so we don't have to remove any.
```
filtered_book_tags['normalized_count'] = filtered_book_tags['count'].apply(lambda x: math.log(1 + x, 2))
filtered_book_tags.head(15)
```
The count is then normalized because the numbers intuitively make more sense and the distribution then will be smoother.
```
filtered_book_tags['count'].hist()
filtered_book_tags.normalized_count.hist()
```
# Adding author as a tag
The other important field in a similarity function (or content-based systems) is the author. But first in order to use the author we need to clean it first.
```
books.head(15).authors
```
Too bad our author data is a string possible of contaning multiple authors. So we must separate them, add them to the tag list and make an index of author name to tag_id
```
max_book_tags = filtered_book_tags.groupby('goodreads_book_id').max()
max_book_tags.head()
_, c, nc = max_book_tags.loc[1]
c, nc
# Get the author list for each book
authors = books[['goodreads_book_id', 'authors']]
authors.loc[:, 'authors_list'] = books.authors.str.split(',').apply(lambda x: list(map(lambda s: s.replace(" ", ""), x)))
authors.head()
# Make the author_tags dataframe and add the author tags to the book_tags with the normalized_count equal
# to the highest normalized_count of that specific book
author_index = {}
start_tag_id = len(tags)
author_tags = pd.DataFrame({'tag_id': pd.Series().astype(int), 'tag_name': []})
author_book_tags = pd.DataFrame({
'goodreads_book_id': pd.Series().astype(int),
'tag_id': pd.Series().astype(int),
'count': pd.Series().astype(int),
'normalized_count': pd.Series()
})
for index, entry in authors.iterrows():
for author in entry.authors_list:
if author not in author_index:
new_tag_id = start_tag_id + len(author_index)
author_index[author] = new_tag_id
author_tags.loc[len(author_index)] = [new_tag_id, author]
tag_id = author_index[author]
_, count, normalized_count = max_book_tags.loc[entry.goodreads_book_id]
author_book_tags.loc[len(author_book_tags)] = [int(entry.goodreads_book_id), int(tag_id), int(count), normalized_count]
author_tags.shape
author_tags.head(10)
author_book_tags.head()
final_book_tags = filtered_book_tags \
.append(author_book_tags, ignore_index=False) \
.astype({'goodreads_book_id': int, 'tag_id': int, 'count': int}) \
.reset_index(drop=True) \
.drop_duplicates()
final_book_tags.tail()
tag_matrix = final_book_tags.pivot(index='goodreads_book_id', columns='tag_id', values='normalized_count').fillna(0)
tag_matrix.shape
tag_matrix.head()
```
Now let's convert goodreads_book_id to our native book_id
```
book_id_idx = books[['goodreads_book_id', 'book_id']].set_index('goodreads_book_id')
tag_matrix['book_id'] = book_id_idx.loc[tag_matrix.index]
tag_matrix.set_index('book_id', inplace=True)
tag_matrix.head()
```
Let's test a few book to see if we did it correctly
```
final_tags = tags.append(author_tags).set_index('tag_id')
final_tags.tail()
# Pick a Harry Potter book
books[books.book_id==2].title
# Harry Potter and the Sorcerer's Stone
hp1 = tag_matrix.loc[2].sort_values(ascending=False)
hp1 = hp1[hp1>0]
pd.DataFrame({'tag': final_tags.loc[hp1.index].tag_name, 'strength': hp1}).head(15)
```
Looking good! Now we will turn that matrix into a space vector to be able to use the cosine similarity as our similarity function
```
final_tag_matrix = pd.DataFrame(
data = preprocessing.normalize(tag_matrix.values, norm='l2'),
index = tag_matrix.index,
columns = tag_matrix.columns
)
final_tag_matrix.head()
# Harry Potter 1
hp1 = final_tag_matrix.loc[2].sort_values(ascending=False)
hp1 = hp1[hp1>0]
pd.DataFrame({'tag': final_tags.loc[hp1.index].tag_name, 'strength': hp1}).head(15)
```
We are set! Now it's time to create to similarity dataframe
```
cosine_sim = pd.DataFrame(
data = cosine_similarity(final_tag_matrix, final_tag_matrix),
index = final_tag_matrix.index,
columns = final_tag_matrix.index
)
cosine_sim.head()
```
Now we got our similarity function
```
book_title_idx = books.set_index('book_id')['title']
def check_similarity(book_id_1, book_id_2):
print("Book 1:", book_title_idx.loc[book_id_1])
print("Book 2:", book_title_idx.loc[book_id_2])
print("Similarity:", cosine_sim.loc[book_id_1][book_id_2])
check_similarity(27, 21)
```
We can check top 10 books that are similar to a book
```
def top_10(book_id):
print("Top 10 similar book to", book_title_idx.loc[book_id])
results = cosine_sim.loc[book_id].sort_values(ascending=False).index
print("Score\tTitle")
for i in range(0, 11):
id = results[i]
print("%.2f\t%s" % (cosine_sim.loc[book_id][id], book_title_idx.loc[id]))
# Books that are most similar to Harry Potter and the Half-Blood Prince
top_10(27)
```
The result is what we expected
- The most similar book is of course itself, so we can always assume it will be at the top of the list
- Most of the books in the list come from the Harry Potter franchise, which makes sense as their contents are similar
Finally, we will save the tag_matrix (book profiles) to use them later on. Since feather doesn't allow non-default index and non-string column we'll set the column to tag name instead of tag id and reset the index. The next time we use it we can set the index back to book_id. The similarity dataframe can be easily recreated from the profiles so we don't need to save it
```
book_profiles = final_tag_matrix.copy()
book_profiles.columns = final_tags.loc[book_profiles.columns].values.flatten()
book_profiles.reset_index(inplace=True)
book_profiles.head()
book_profiles.to_feather('./data/book_profiles')
```
|
github_jupyter
|
%matplotlib inline
%load_ext autoreload
%autoreload 2
import pandas as pd
import numpy as np
import math
import feather
from sklearn import preprocessing
from sklearn.metrics.pairwise import cosine_similarity
tags = pd.read_csv('./data/tags.csv')
books = feather.read_dataframe('./data/books_small')
book_tags = pd.read_csv('./data/book_tags.csv')
book_tags = book_tags[book_tags.goodreads_book_id.isin(books.goodreads_book_id)]
tags.head()
book_tags.head()
(tags.shape, book_tags.shape)
# Remove 2 tag with negative count
counts = book_tags.groupby('tag_id').sum()[['count']].sort_values(by='count', ascending=False)[:-2]
counts['tag_name'] = tags.set_index('tag_id', drop=True).loc[counts.index]
counts.head(15)
tags_to_skip = [30574, 8717, 11557, 5207, 22743, 17213, 4949, 18045, 11590,
22753, 9221, 30521, 20774, 15169, 25151, 32586, 20849, 25152, 25150, 25149,
24960, 11497, 2104, 22689, 25148, 11505, 1128, 9638, 9477, 240, 25153,
24997, 25147, 24999, 24996, 5051, 5481, 14064, 1078, 27535, 24995, 236,
25822, 31505, 29316, 25765, 25146, 9484, 11978, 11550, 20824, 30672, 22352,
26256, 783, 11491, 831, 20731, 251, 24993, 751, 7478, 21815, 4959, 25506,
25000, 4985, 30573, 22330, 17217, 5444, 32099, 14764, 18051, 722, 11503,
21274, 5180, 20781, 20811]
print("Tags skipped:", len(tags_to_skip))
counts.loc[tags_to_skip].head(10)
filtered_tags = counts.loc[~counts.index.isin(tags_to_skip)][:220]
print("Tags kept:", len(filtered_tags))
filtered_tags.head(15)
tags_kept = filtered_tags.index
filtered_book_tags = book_tags[book_tags.tag_id.isin(tags_kept)]
# Remove invalid entry (count = -1 and duplicated)
filtered_book_tags = filtered_book_tags[filtered_book_tags['count']>0].drop(265128, axis=0)
filtered_book_tags.shape
# The number of books that has tags
len(filtered_book_tags.goodreads_book_id.unique())
filtered_book_tags['normalized_count'] = filtered_book_tags['count'].apply(lambda x: math.log(1 + x, 2))
filtered_book_tags.head(15)
filtered_book_tags['count'].hist()
filtered_book_tags.normalized_count.hist()
books.head(15).authors
max_book_tags = filtered_book_tags.groupby('goodreads_book_id').max()
max_book_tags.head()
_, c, nc = max_book_tags.loc[1]
c, nc
# Get the author list for each book
authors = books[['goodreads_book_id', 'authors']]
authors.loc[:, 'authors_list'] = books.authors.str.split(',').apply(lambda x: list(map(lambda s: s.replace(" ", ""), x)))
authors.head()
# Make the author_tags dataframe and add the author tags to the book_tags with the normalized_count equal
# to the highest normalized_count of that specific book
author_index = {}
start_tag_id = len(tags)
author_tags = pd.DataFrame({'tag_id': pd.Series().astype(int), 'tag_name': []})
author_book_tags = pd.DataFrame({
'goodreads_book_id': pd.Series().astype(int),
'tag_id': pd.Series().astype(int),
'count': pd.Series().astype(int),
'normalized_count': pd.Series()
})
for index, entry in authors.iterrows():
for author in entry.authors_list:
if author not in author_index:
new_tag_id = start_tag_id + len(author_index)
author_index[author] = new_tag_id
author_tags.loc[len(author_index)] = [new_tag_id, author]
tag_id = author_index[author]
_, count, normalized_count = max_book_tags.loc[entry.goodreads_book_id]
author_book_tags.loc[len(author_book_tags)] = [int(entry.goodreads_book_id), int(tag_id), int(count), normalized_count]
author_tags.shape
author_tags.head(10)
author_book_tags.head()
final_book_tags = filtered_book_tags \
.append(author_book_tags, ignore_index=False) \
.astype({'goodreads_book_id': int, 'tag_id': int, 'count': int}) \
.reset_index(drop=True) \
.drop_duplicates()
final_book_tags.tail()
tag_matrix = final_book_tags.pivot(index='goodreads_book_id', columns='tag_id', values='normalized_count').fillna(0)
tag_matrix.shape
tag_matrix.head()
book_id_idx = books[['goodreads_book_id', 'book_id']].set_index('goodreads_book_id')
tag_matrix['book_id'] = book_id_idx.loc[tag_matrix.index]
tag_matrix.set_index('book_id', inplace=True)
tag_matrix.head()
final_tags = tags.append(author_tags).set_index('tag_id')
final_tags.tail()
# Pick a Harry Potter book
books[books.book_id==2].title
# Harry Potter and the Sorcerer's Stone
hp1 = tag_matrix.loc[2].sort_values(ascending=False)
hp1 = hp1[hp1>0]
pd.DataFrame({'tag': final_tags.loc[hp1.index].tag_name, 'strength': hp1}).head(15)
final_tag_matrix = pd.DataFrame(
data = preprocessing.normalize(tag_matrix.values, norm='l2'),
index = tag_matrix.index,
columns = tag_matrix.columns
)
final_tag_matrix.head()
# Harry Potter 1
hp1 = final_tag_matrix.loc[2].sort_values(ascending=False)
hp1 = hp1[hp1>0]
pd.DataFrame({'tag': final_tags.loc[hp1.index].tag_name, 'strength': hp1}).head(15)
cosine_sim = pd.DataFrame(
data = cosine_similarity(final_tag_matrix, final_tag_matrix),
index = final_tag_matrix.index,
columns = final_tag_matrix.index
)
cosine_sim.head()
book_title_idx = books.set_index('book_id')['title']
def check_similarity(book_id_1, book_id_2):
print("Book 1:", book_title_idx.loc[book_id_1])
print("Book 2:", book_title_idx.loc[book_id_2])
print("Similarity:", cosine_sim.loc[book_id_1][book_id_2])
check_similarity(27, 21)
def top_10(book_id):
print("Top 10 similar book to", book_title_idx.loc[book_id])
results = cosine_sim.loc[book_id].sort_values(ascending=False).index
print("Score\tTitle")
for i in range(0, 11):
id = results[i]
print("%.2f\t%s" % (cosine_sim.loc[book_id][id], book_title_idx.loc[id]))
# Books that are most similar to Harry Potter and the Half-Blood Prince
top_10(27)
book_profiles = final_tag_matrix.copy()
book_profiles.columns = final_tags.loc[book_profiles.columns].values.flatten()
book_profiles.reset_index(inplace=True)
book_profiles.head()
book_profiles.to_feather('./data/book_profiles')
| 0.314051 | 0.810704 |
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/Datasets/Vectors/global_land_ice_measurements.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Datasets/Vectors/global_land_ice_measurements.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=Datasets/Vectors/global_land_ice_measurements.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Datasets/Vectors/global_land_ice_measurements.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
## Install Earth Engine API and geemap
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.
The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet.
**Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving).
```
# Installs geemap package
import subprocess
try:
import geemap
except ImportError:
print('geemap package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap'])
# Checks whether this notebook is running on Google Colab
try:
import google.colab
import geemap.eefolium as emap
except:
import geemap as emap
# Authenticates and initializes Earth Engine
import ee
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
```
## Create an interactive map
The default basemap is `Google Satellite`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py#L13) can be added using the `Map.add_basemap()` function.
```
Map = emap.Map(center=[40,-100], zoom=4)
Map.add_basemap('ROADMAP') # Add Google Map
Map
```
## Add Earth Engine Python script
```
# Add Earth Engine dataset
dataset = ee.FeatureCollection('GLIMS/current')
visParams = {
'palette': ['gray', 'cyan', 'blue'],
'min': 0.0,
'max': 10.0,
'opacity': 0.8,
}
image = ee.Image().float().paint(dataset, 'area')
Map.setCenter(-35.618, 66.743, 7)
Map.addLayer(image, visParams, 'GLIMS/current')
# Map.addLayer(dataset, {}, 'for Inspector', False)
```
## Display Earth Engine data layers
```
Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.
Map
```
|
github_jupyter
|
# Installs geemap package
import subprocess
try:
import geemap
except ImportError:
print('geemap package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap'])
# Checks whether this notebook is running on Google Colab
try:
import google.colab
import geemap.eefolium as emap
except:
import geemap as emap
# Authenticates and initializes Earth Engine
import ee
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
Map = emap.Map(center=[40,-100], zoom=4)
Map.add_basemap('ROADMAP') # Add Google Map
Map
# Add Earth Engine dataset
dataset = ee.FeatureCollection('GLIMS/current')
visParams = {
'palette': ['gray', 'cyan', 'blue'],
'min': 0.0,
'max': 10.0,
'opacity': 0.8,
}
image = ee.Image().float().paint(dataset, 'area')
Map.setCenter(-35.618, 66.743, 7)
Map.addLayer(image, visParams, 'GLIMS/current')
# Map.addLayer(dataset, {}, 'for Inspector', False)
Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.
Map
| 0.528533 | 0.95018 |
```
import pandas as pd
df = pd.read_csv('./generic-food.csv')
df.head()
list(df['GROUP'].unique())
list(df['SCIENTIFIC NAME'].unique())
list(df['SUB GROUP'].unique())
df[df['GROUP'] == 'Fats and oils']
first = ['Cereals and cereal products', 'Confectioneries', 'Baking goods', 'Snack foods']
second = ['Teas', 'Coffee and coffee products', 'Beverages', 'Aquatic foods', 'Baby foods']
third = ['Milk and milk products',]
fourth = ['Fats and oils', 'Animal foods',]
fivth = ['Herbs and Spices', 'Herbs and spices', 'Vegetables', 'Fruits', 'Nuts', 'Pulses', 'Soy', ]
dff = []
for group in first:
dff.append(df[df['GROUP'] == group])
df1 = pd.concat(dff)
print(list(df1['GROUP'].unique()) == first)
df1.head()
dff = []
for group in second:
dff.append(df[df['GROUP'] == group])
df2 = pd.concat(dff)
print(list(df2['GROUP'].unique()) == second)
df2.head()
dff = []
for group in third:
dff.append(df[df['GROUP'] == group])
df3 = pd.concat(dff)
print(list(df3['GROUP'].unique()) == third)
df3.head()
dff = []
for group in fourth:
dff.append(df[df['GROUP'] == group])
df4 = pd.concat(dff)
print(list(df4['GROUP'].unique()) == fourth)
df4.head()
dff = []
for group in fivth:
dff.append(df[df['GROUP'] == group])
df5 = pd.concat(dff)
print(list(df5['GROUP'].unique()) == fivth)
df5.head()
```
## Load df to txt
```
with open('df1.txt', 'w') as f:
for name in list(df1['FOOD NAME'].unique()):
f.write(str(name).lower() + '\n')
for name in list(df1['SCIENTIFIC NAME'].unique()):
f.write(str(name).lower() + '\n')
for name in list(df1['GROUP'].unique()):
f.write(str(name).lower() + '\n')
for name in list(df1['SUB GROUP'].unique()):
f.write(str(name).lower() + '\n')
with open('df2.txt', 'w') as f:
for name in list(df2['FOOD NAME'].unique()):
f.write(str(name).lower() + '\n')
for name in list(df2['SCIENTIFIC NAME'].unique()):
f.write(str(name).lower() + '\n')
for name in list(df2['GROUP'].unique()):
f.write(str(name).lower() + '\n')
for name in list(df2['SUB GROUP'].unique()):
f.write(str(name).lower() + '\n')
with open('df3.txt', 'w') as f:
for name in list(df3['FOOD NAME'].unique()):
f.write(str(name).lower() + '\n')
for name in list(df3['SCIENTIFIC NAME'].unique()):
f.write(str(name).lower() + '\n')
for name in list(df3['GROUP'].unique()):
f.write(str(name).lower() + '\n')
for name in list(df3['SUB GROUP'].unique()):
f.write(str(name).lower() + '\n')
with open('df4.txt', 'w') as f:
for name in list(df4['FOOD NAME'].unique()):
f.write(str(name).lower() + '\n')
for name in list(df4['SCIENTIFIC NAME'].unique()):
f.write(str(name).lower() + '\n')
for name in list(df4['GROUP'].unique()):
f.write(str(name).lower() + '\n')
for name in list(df4['SUB GROUP'].unique()):
f.write(str(name).lower() + '\n')
with open('df5.txt', 'w') as f:
for name in list(df5['FOOD NAME'].unique()):
f.write(str(name).lower() + '\n')
for name in list(df5['SCIENTIFIC NAME'].unique()):
f.write(str(name).lower() + '\n')
for name in list(df5['GROUP'].unique()):
f.write(str(name).lower() + '\n')
for name in list(df5['SUB GROUP'].unique()):
f.write(str(name).lower() + '\n')
```
|
github_jupyter
|
import pandas as pd
df = pd.read_csv('./generic-food.csv')
df.head()
list(df['GROUP'].unique())
list(df['SCIENTIFIC NAME'].unique())
list(df['SUB GROUP'].unique())
df[df['GROUP'] == 'Fats and oils']
first = ['Cereals and cereal products', 'Confectioneries', 'Baking goods', 'Snack foods']
second = ['Teas', 'Coffee and coffee products', 'Beverages', 'Aquatic foods', 'Baby foods']
third = ['Milk and milk products',]
fourth = ['Fats and oils', 'Animal foods',]
fivth = ['Herbs and Spices', 'Herbs and spices', 'Vegetables', 'Fruits', 'Nuts', 'Pulses', 'Soy', ]
dff = []
for group in first:
dff.append(df[df['GROUP'] == group])
df1 = pd.concat(dff)
print(list(df1['GROUP'].unique()) == first)
df1.head()
dff = []
for group in second:
dff.append(df[df['GROUP'] == group])
df2 = pd.concat(dff)
print(list(df2['GROUP'].unique()) == second)
df2.head()
dff = []
for group in third:
dff.append(df[df['GROUP'] == group])
df3 = pd.concat(dff)
print(list(df3['GROUP'].unique()) == third)
df3.head()
dff = []
for group in fourth:
dff.append(df[df['GROUP'] == group])
df4 = pd.concat(dff)
print(list(df4['GROUP'].unique()) == fourth)
df4.head()
dff = []
for group in fivth:
dff.append(df[df['GROUP'] == group])
df5 = pd.concat(dff)
print(list(df5['GROUP'].unique()) == fivth)
df5.head()
with open('df1.txt', 'w') as f:
for name in list(df1['FOOD NAME'].unique()):
f.write(str(name).lower() + '\n')
for name in list(df1['SCIENTIFIC NAME'].unique()):
f.write(str(name).lower() + '\n')
for name in list(df1['GROUP'].unique()):
f.write(str(name).lower() + '\n')
for name in list(df1['SUB GROUP'].unique()):
f.write(str(name).lower() + '\n')
with open('df2.txt', 'w') as f:
for name in list(df2['FOOD NAME'].unique()):
f.write(str(name).lower() + '\n')
for name in list(df2['SCIENTIFIC NAME'].unique()):
f.write(str(name).lower() + '\n')
for name in list(df2['GROUP'].unique()):
f.write(str(name).lower() + '\n')
for name in list(df2['SUB GROUP'].unique()):
f.write(str(name).lower() + '\n')
with open('df3.txt', 'w') as f:
for name in list(df3['FOOD NAME'].unique()):
f.write(str(name).lower() + '\n')
for name in list(df3['SCIENTIFIC NAME'].unique()):
f.write(str(name).lower() + '\n')
for name in list(df3['GROUP'].unique()):
f.write(str(name).lower() + '\n')
for name in list(df3['SUB GROUP'].unique()):
f.write(str(name).lower() + '\n')
with open('df4.txt', 'w') as f:
for name in list(df4['FOOD NAME'].unique()):
f.write(str(name).lower() + '\n')
for name in list(df4['SCIENTIFIC NAME'].unique()):
f.write(str(name).lower() + '\n')
for name in list(df4['GROUP'].unique()):
f.write(str(name).lower() + '\n')
for name in list(df4['SUB GROUP'].unique()):
f.write(str(name).lower() + '\n')
with open('df5.txt', 'w') as f:
for name in list(df5['FOOD NAME'].unique()):
f.write(str(name).lower() + '\n')
for name in list(df5['SCIENTIFIC NAME'].unique()):
f.write(str(name).lower() + '\n')
for name in list(df5['GROUP'].unique()):
f.write(str(name).lower() + '\n')
for name in list(df5['SUB GROUP'].unique()):
f.write(str(name).lower() + '\n')
| 0.064014 | 0.413418 |
```
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import seaborn as sns
import sys
import os
import json
import itertools
import umap
from colour import Color
from collections import Counter
sys.path.insert(1, "../")
sys.path.insert(1, "../utilities")
from group_analysis import do_kmeans_clustering, plot_clusters, log_and_scale, colour_list, display_group_kw_combis
from helpers import load_posts, load_toks, load_pos, get_top_n_toks
from clustering import reduce_features, make_elbow_plot, compare_binary_normed_feature_embeddings, mc_list, plot_bin_and_norm_clusters, plot_contingency_matrix, calculate_cluster_lrs
from language_change_methods.features import get_tok_counts, function_words, combine_counts, make_feature_matrix
from language_change_methods.utility_functions import basic_preprocessing, get_ngram_example, get_text_example_of_pos, make_ngram_concordance, make_pos_concordance
from sklearn.cluster import KMeans, SpectralClustering, MeanShift, estimate_bandwidth, AgglomerativeClustering
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.model_selection import cross_val_predict
from sklearn import metrics
from nltk import ngrams as make_ngrams
from settings import TFES_FP as DB_FP, TFES_TOK_FP
GRAPH_DIR = "./Graphs"
%load_ext autoreload
%autoreload 2
%%time
all_posts = load_posts(DB_FP)
from helpers import flat_earth_boards, off_topic_boards as other_boards
fe_posts = all_posts.query("board_id in @flat_earth_boards")
ot_posts = all_posts.query("board_id in @other_boards")
toks = {int(x[0]): x[1] for x in load_toks(TFES_TOK_FP)}
toks = pd.Series(toks)
toks = toks[toks.index.isin(fe_posts.index)]
pos = {int(x[0]): x[1] for x in load_pos(TFES_TOK_FP)}
pos = pd.Series(pos)
pos = pos[pos.index.isin(fe_posts.index)]
pos = pos.loc[toks.index]
fe_posts = fe_posts.loc[toks.index]
with open("../data/user_metagroup.json") as group_file:
meta_clusts = pd.Series(json.load(group_file))
meta_clusts.index = meta_clusts.index.astype("float64")
post_lengths = toks.apply(len)
user_n_words = post_lengths.groupby(fe_posts["poster_id"]).sum()
posts_per_user = fe_posts.groupby("poster_id").size()
over_100 = posts_per_user[posts_per_user > 100]
print(f"{len(over_100)} with > 100 posts")
top_20 = posts_per_user.sort_values(ascending=False).head(20)
print(f"{len(top_20)} in the top 20")
curr_users = meta_clusts[meta_clusts == "Core Community"].index
user_n_words = user_n_words.loc[curr_users]
posts_per_user = posts_per_user.loc[curr_users]
curr_posts = fe_posts.query("poster_id in @curr_users")
# Get colour gradient
colour_gradient = list(Color("red").range_to(Color("blue"),len(curr_users)))
colour_gradient = pd.Series(colour_gradient, index=posts_per_user.loc[curr_users].sort_values(ascending=True).index)
colour_gradient = colour_gradient.loc[curr_users].apply(str)
```
# Create the user text chunks
```
merge_lists = lambda x: list(itertools.chain.from_iterable(x))
def get_chunks(u, tokens, chunk_size):
for i in range(0, len(tokens)-chunk_size, chunk_size):
yield u, tokens[i:i+chunk_size]
def make_tok_chonks(tokens, chunk_size):
user_chunks = [[[user, chunk] for user, chunk in get_chunks(user, curr_toks, chunk_size)] for user, curr_toks in tokens.items()]
user_chunks = merge_lists(user_chunks)
user_chunks = pd.DataFrame(user_chunks, columns=["User", "Chunk"])
return user_chunks
CHUNK_SIZE = 1000
%%time
user_toks = toks.groupby(curr_posts["poster_id"]).apply(merge_lists)
user_chunks = make_tok_chonks(user_toks, CHUNK_SIZE)
%%time
user_pos = pos.groupby(curr_posts["poster_id"]).apply(merge_lists)
user_pos_chunks = make_tok_chonks(user_pos, CHUNK_SIZE)
```
# Create the Different Feature Counts
#### BOW
```
%%time
# Get the counts
bow_counts = {i: get_tok_counts(post) for i, post in user_toks.items()}
bow_counts = {i: {tok: count for tok, count in post.items() if tok not in function_words} for i, post in bow_counts.items()}
top_n = get_top_n_toks(bow_counts.values(), 10000)
bow_counts = {i: {tok: count for tok, count in post.items() if tok in top_n} for i, post in bow_counts.items()}
bow_counts = pd.Series(bow_counts)
```
#### Tfidf bow counts
```
%%time
# Credit for this cell: http://www.davidsbatista.net/blog/2018/02/28/TfidfVectorizer/
from sklearn.feature_extraction.text import TfidfVectorizer
# If you had counts instead of tokens, you'd use "TfidfTransformer"
def dummy_fun(doc):
return doc
tfidf = TfidfVectorizer(
analyzer='word',
tokenizer=dummy_fun,
preprocessor=dummy_fun,
token_pattern=None)
tfidf_matrix = tfidf.fit_transform(user_toks)
tfidf_feats = tfidf_matrix[:, np.isin(tfidf.get_feature_names(), top_n)].todense()
```
#### PoS Trigrams
```
pos_trigrams = pd.Series([make_ngrams(post, 3) for post in user_pos], index=user_pos.index)
pos_trigrams = pos_trigrams.apply(lambda post: ["_".join(words) for words in post])
top_n = [x[0] for x in Counter(merge_lists(pos_trigrams)).most_common(1000)]
pos_tri_counts = pos_trigrams.apply(Counter).apply(lambda dic: {w: c for w, c in dic.items() if w in top_n})
pos_tri_counts = pd.Series(pos_tri_counts)
# pos_tri_chunks = pd.DataFrame(index=user_pos_chunks.index)
# pos_tri_chunks["User"] = user_pos_chunks["User"]
# pos_tri_chunks["Chunk"] = pos_trigrams
```
# Create the Feature Matrix
```
%%time
# Initialise feats to None
bin_feats = None
norm_feats = None
# Make the feature matrix
curr_feat_type = "pos_tri"
SCALE = True
APPLY_PCA = True
if curr_feat_type == "pos_tri":
curr_counts = pos_tri_counts
curr_toks = pos_trigrams
elif curr_feat_type == "bow":
curr_counts = bow_counts
curr_toks = user_toks
elif curr_feat_type == "tfidf":
curr_counts = bow_counts
curr_toks = user_toks
bin_feats = np.vectorize(lambda x: 1 if x > 0 else 0)(tfidf_feats)
norm_feats = tfidf_feats
if bin_feats is None:
curr_feats, curr_feat_names = make_feature_matrix(curr_counts.to_dict(), False)
bin_feats = np.vectorize(lambda x: 1 if x > 0 else 0)(curr_feats)
norm_feats = curr_feats / user_toks.apply(len).values[:,None]
```
#### Scaling
```
if SCALE:
norm_feats = StandardScaler().fit_transform(norm_feats)
```
#### PCA reduction
```
%%time
from sklearn.decomposition import PCA
pca = PCA(.95)
if APPLY_PCA:
norm_feats = pca.fit_transform(norm_feats)
print(f"{norm_feats.shape[1]} components")
```
#### Make 2d embeddings
```
%%time
norm_embedding = reduce_features(norm_feats)
fig, ax = plt.subplots(figsize=(8,8))
ax.scatter(norm_feats[:,0], norm_feats[:,1])
plt.show()
```
# Do the Clustering
```
from scipy.cluster.hierarchy import dendrogram, linkage
from scipy.cluster.hierarchy import cophenet
from scipy.spatial.distance import pdist
from scipy.spatial.distance import cosine as cosine_dist
from sklearn.cluster import AgglomerativeClustering
def plot_hc(feats, labels, out_fp=None):
# Perform the Hierarchical Clustering
d_list = linkage(feats, 'average', metric="cosine")
# Calculate the Cophenetic Correlation Coefficient
c, coph_dists = cophenet(d_list, pdist(feats))
print("Cophenetic Correlation Coefficient: {}".format(c))
# Do the actual plotting.
fig, ax = plt.subplots(figsize=(12,12))
dendrogram(
d_list,
leaf_rotation=0, # rotates the x axis labels
leaf_font_size=14, # font size for the x axis labels
labels = labels,
ax=ax,
color_threshold=0
)
plt.setp(ax.get_xticklabels(), fontsize=14)
plt.setp(ax.get_yticklabels(), fontsize=14)
ax.set_xlabel("Users", fontsize=14)
ax.set_ylabel("Cosine Distance", fontsize=14)
ax.grid()
plt.tight_layout()
if out_fp is not None:
fig.savefig(out_fp)
plot_hc(norm_feats, user_toks.index.astype(int), out_fp=None)#os.path.join(GRAPH_DIR, f"dendrogram_{curr_feat_type}.pdf"))
%%time
for i in range(2,10):
model = AgglomerativeClustering(n_clusters=i, affinity='cosine', linkage='average')
clusters = model.fit_predict(norm_feats)
avg_silhouette_score = metrics.silhouette_score(norm_feats, clusters, metric="cosine")
print(f"Average silhoette score for {i} clusters:\t", avg_silhouette_score, "\n")
%%time
sil_scores = []
for i in range(2,len(user_toks)):
model = AgglomerativeClustering(n_clusters=i, affinity='cosine', linkage='average')
clusters = model.fit_predict(norm_feats)
sil_scores.append(metrics.silhouette_score(norm_feats, clusters, metric="cosine"))
fig, ax = plt.subplots(figsize=(8,4))
ax.plot(list(range(2, len(user_toks))), sil_scores)
plt.show()
model = AgglomerativeClustering(n_clusters=2, affinity='cosine', linkage='average')
clusters1 = model.fit_predict(norm_feats)
for clust in set(clusters1):
print(f"Num users in Cluster {clust}: {len(user_toks[clusters1==clust].index)}")
for clust in set(clusters1):
print(f"Num words in Cluster {clust}: {len(merge_lists(user_toks[clusters1==clust]))}")
for clust in set(clusters1):
print(f"Avg words in Cluster {clust}: {user_toks[clusters1==clust].apply(len).mean():.0f}")
print(f"Median words in Cluster {clust}: {user_toks[clusters1==clust].apply(len).median():.0f}")
print()
model = AgglomerativeClustering(n_clusters=None, distance_threshold=1, affinity='cosine', linkage='average')
clusters2 = model.fit_predict(norm_feats)
for clust in set(clusters2):
print(f"Num users in Cluster {clust}: {len(user_toks[clusters2==clust].index)}")
for clust in set(clusters2):
print(f"Num words in Cluster {clust}: {len(merge_lists(user_toks[clusters2==clust]))}")
for clust in set(clusters2):
print(f"Avg words in Cluster {clust}: {user_toks[clusters2==clust].apply(len).mean():.0f}")
print(f"Median words in Cluster {clust}: {user_toks[clusters2==clust].apply(len).median():.0f}")
print()
clusters = clusters1
avg_silhouette_score = metrics.silhouette_score(norm_feats, clusters, metric="cosine")
print("Average silhoette score for Normed:\t", avg_silhouette_score, "\n")
for clust in set(clusters):
print(clust, len(user_toks[clusters==clust].index))
print(",".join(map(str, user_toks[user_toks.index.isin(top_20.index)].index)))
print(",".join(map(str, clusters[user_toks.index.isin(top_20.index)])))
for i, (c1, c2) in enumerate(zip(clusters[user_toks.index.isin(top_20.index)], clusters2[user_toks.index.isin(top_20.index)])):
print(i, c1, c2, sep=",")
ling_clusts = {"Cluster 0": list(user_toks[clusters1==0].index),
"Cluster 1": list(user_toks[clusters1==1].index)}
with open(os.path.join(GRAPH_DIR, f"{curr_feat_type}_core_clusts.json"), "w") as out_file:
json.dump(ling_clusts, out_file)
fig, ax = plt.subplots(figsize=(8,8))
for c in set(clusters):
ax.scatter(norm_feats[clusters==c,0], norm_feats[clusters==c,1], label=c, alpha=0.3)
ax.grid()
ax.legend()
plt.show()
```
# Analyse Spread Across Users
```
user_toks.index[clusters==0]
user_toks.index[clusters==1]
contingency1 = pd.crosstab(user_toks.index.values, [clusters1], rownames=["User"], colnames=["Cluster"])
contingency2 = pd.crosstab(user_toks.index.values, [clusters2], rownames=["User"], colnames=["Cluster"])
fig, ax = plt.subplots(figsize=(8,6))
sns.heatmap(contingency1.loc[top_20.index], annot=True, ax=ax, fmt='g', cmap="Greens")
# fig.savefig(os.path.join(GRAPH_DIR, f"chunk_contingency_2_{curr_feat_type}.pdf"))
plt.show()
fig, ax = plt.subplots(figsize=(4,20))
sns.heatmap(contingency1.loc[posts_per_user.sort_values(ascending=False).head(100).index], annot=True, ax=ax, fmt='g', cmap="Greens")
# fig.savefig(os.path.join(GRAPH_DIR, f"chunk_contingency_2_{curr_feat_type}.pdf"))
plt.show()
fig, ax = plt.subplots(figsize=(8,6))
sns.heatmap(contingency2.loc[top_20.index], annot=True, ax=ax, fmt='g', cmap="Greens")
# fig.savefig(os.path.join(GRAPH_DIR, f"chunk_contingency_2_{curr_feat_type}.pdf"))
plt.show()
fig, ax = plt.subplots(figsize=(4,50))
sns.heatmap(contingency2.loc[posts_per_user.sort_values(ascending=False).index], annot=True, ax=ax, fmt='g', cmap="Greens")
# fig.savefig(os.path.join(GRAPH_DIR, f"chunk_contingency_2_{curr_feat_type}.pdf"))
plt.show()
```
# Compare clusters
```
clust_comparison = pd.crosstab(clusters1, [clusters2], rownames=["Clust 1"], colnames=["Clust 2"])
clust_comparison
fig, ax = plt.subplots(figsize=(6,4))
sns.heatmap(clust_comparison, annot=True, ax=ax, fmt='g', cmap="Greens")
plt.show()
```
# Get Some Keywords Between Clusters
```
%%time
lr = lambda x, y: np.log2(x / y)
def get_cluster_keywords(tokens, clusters):
clust_counts = tokens.groupby(clusters).apply(
lambda x: [Counter(merge_lists(x))]).apply(
lambda x: x[0])
clust_n_words = {clust: tokens[clusters==clust].apply(len).sum() for clust in set(clusters)}
for clust, counts in clust_counts.items():
other_counts = Counter(merge_lists(tokens[clusters!=clust]))
other_nwords = tokens[clusters!=clust].apply(len).sum()
lrs = calculate_cluster_lrs(counts, other_counts, clust_n_words[clust], other_nwords)
lrs = {w: l for w, l in lrs.items() if counts[w] > 100}
lrs = pd.Series(lrs).sort_values(ascending=False)
t10_lr = lrs.head(20)
b10_lr = lrs.tail(20)
print(clust)
print("-------------------------------------------------------------------------------------")
for (w1, lr1), (w2, lr2) in zip(t10_lr.items(), b10_lr.items()):
print(f"{w1:20}\t{lr1:10.2f}\t|\t{w2:20}\t{lr2:10.2f}")
print("-------------------------------------------------------------------------------------")
def get_cluster_lrs(tokens, clusters, min_freq=100):
clust_counts = tokens.groupby(clusters).apply(
lambda x: [Counter(merge_lists(x))]).apply(
lambda x: x[0])
clust_n_words = {clust: tokens[clusters==clust].apply(len).sum() for clust in set(clusters)}
clust_kw = dict()
for clust, counts in clust_counts.items():
other_counts = Counter(merge_lists(tokens[clusters!=clust]))
other_nwords = tokens[clusters!=clust].apply(len).sum()
lrs = calculate_cluster_lrs(counts, other_counts, clust_n_words[clust], other_nwords)
lrs = {w: l for w, l in lrs.items() if counts[w] > min_freq}
lrs = pd.Series(lrs).sort_values(ascending=False)
clust_kw[clust] = lrs
return clust_kw
```
# Key features (2 clusters)
```
clusters = clusters1
```
## Key PoS-Trigrams
```
%%time
get_cluster_keywords(pos_trigrams, clusters)
get_text_example_of_pos(["PUNCT", "INTJ", "INTJ"],
user_pos[clusters==0],
user_toks[clusters==0],
doc_labels=user_toks[clusters==0].index, n_examples=25)
%%time
concs = make_pos_concordance(["NUM", "PROPN", "NUM"],
user_pos[clusters==1],
user_toks[clusters==1],
doc_labels=user_toks[clusters==1].index)
concs = pd.DataFrame(concs, columns=["user", "conc"])
print("Distribution across users:")
num_uses_per_user = concs.groupby("user").size()
print("Num uses per user", num_uses_per_user, sep="\n", end="\n\n")
print("Mean uses per user:\t", num_uses_per_user.mean())
print("Median uses per user:\t", num_uses_per_user.median())
```
## Key words
```
%%time
get_cluster_keywords(user_toks, clusters)
%%time
clust_lrs = get_cluster_lrs(user_toks, clusters)
(clust_lrs[0]["re'ers"])
(clust_lrs[0]["reers"])
clust_lrs[1]["fe'ers"]
clust_lrs[0]["feers"]
clust_lrs[0]["flat"]
clust_lrs[0]["round"]
c1 = len(make_ngram_concordance(["flat", "earthers"],
user_toks[clusters==0],
doc_labels=user_toks[clusters==0].index))
c2 = len(make_ngram_concordance(["flat", "earthers"],
user_toks[clusters==1],
doc_labels=user_toks[clusters==1].index))
n1 = user_toks[clusters==0].apply(len).sum()
n2 = user_toks[clusters==1].apply(len).sum()
print(f"Cluster 0: {c1} / {n1}")
print(f"Cluster 1: {c2} / {n2}")
lr((c1+0.5)/n1, (c2+0.5)/n2)
c1 = len(make_ngram_concordance(["round", "earthers"],
user_toks[clusters==0],
doc_labels=user_toks[clusters==0].index))
c2 = len(make_ngram_concordance(["round", "earthers"],
user_toks[clusters==1],
doc_labels=user_toks[clusters==1].index))
n1 = user_toks[clusters==0].apply(len).sum()
n2 = user_toks[clusters==1].apply(len).sum()
print(f"Cluster 0: {c1} / {n1}")
print(f"Cluster 1: {c2} / {n2}")
lr((c1+0.5)/n1, (c2+0.5)/n2)
get_ngram_example(["fe'ers"],
user_toks[clusters==0],
doc_labels=user_toks[clusters==0].index, n_examples=25)
get_ngram_example(["flat", "earthers"],
user_toks[clusters==0],
doc_labels=user_toks[clusters==0].index, n_examples=25)
```
# Key features (cosine dist < 1 clusters)
```
clusters = clusters2
%%time
get_cluster_keywords(pos_trigrams, clusters)
%%time
get_cluster_keywords(user_toks, clusters)
```
|
github_jupyter
|
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import seaborn as sns
import sys
import os
import json
import itertools
import umap
from colour import Color
from collections import Counter
sys.path.insert(1, "../")
sys.path.insert(1, "../utilities")
from group_analysis import do_kmeans_clustering, plot_clusters, log_and_scale, colour_list, display_group_kw_combis
from helpers import load_posts, load_toks, load_pos, get_top_n_toks
from clustering import reduce_features, make_elbow_plot, compare_binary_normed_feature_embeddings, mc_list, plot_bin_and_norm_clusters, plot_contingency_matrix, calculate_cluster_lrs
from language_change_methods.features import get_tok_counts, function_words, combine_counts, make_feature_matrix
from language_change_methods.utility_functions import basic_preprocessing, get_ngram_example, get_text_example_of_pos, make_ngram_concordance, make_pos_concordance
from sklearn.cluster import KMeans, SpectralClustering, MeanShift, estimate_bandwidth, AgglomerativeClustering
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.model_selection import cross_val_predict
from sklearn import metrics
from nltk import ngrams as make_ngrams
from settings import TFES_FP as DB_FP, TFES_TOK_FP
GRAPH_DIR = "./Graphs"
%load_ext autoreload
%autoreload 2
%%time
all_posts = load_posts(DB_FP)
from helpers import flat_earth_boards, off_topic_boards as other_boards
fe_posts = all_posts.query("board_id in @flat_earth_boards")
ot_posts = all_posts.query("board_id in @other_boards")
toks = {int(x[0]): x[1] for x in load_toks(TFES_TOK_FP)}
toks = pd.Series(toks)
toks = toks[toks.index.isin(fe_posts.index)]
pos = {int(x[0]): x[1] for x in load_pos(TFES_TOK_FP)}
pos = pd.Series(pos)
pos = pos[pos.index.isin(fe_posts.index)]
pos = pos.loc[toks.index]
fe_posts = fe_posts.loc[toks.index]
with open("../data/user_metagroup.json") as group_file:
meta_clusts = pd.Series(json.load(group_file))
meta_clusts.index = meta_clusts.index.astype("float64")
post_lengths = toks.apply(len)
user_n_words = post_lengths.groupby(fe_posts["poster_id"]).sum()
posts_per_user = fe_posts.groupby("poster_id").size()
over_100 = posts_per_user[posts_per_user > 100]
print(f"{len(over_100)} with > 100 posts")
top_20 = posts_per_user.sort_values(ascending=False).head(20)
print(f"{len(top_20)} in the top 20")
curr_users = meta_clusts[meta_clusts == "Core Community"].index
user_n_words = user_n_words.loc[curr_users]
posts_per_user = posts_per_user.loc[curr_users]
curr_posts = fe_posts.query("poster_id in @curr_users")
# Get colour gradient
colour_gradient = list(Color("red").range_to(Color("blue"),len(curr_users)))
colour_gradient = pd.Series(colour_gradient, index=posts_per_user.loc[curr_users].sort_values(ascending=True).index)
colour_gradient = colour_gradient.loc[curr_users].apply(str)
merge_lists = lambda x: list(itertools.chain.from_iterable(x))
def get_chunks(u, tokens, chunk_size):
for i in range(0, len(tokens)-chunk_size, chunk_size):
yield u, tokens[i:i+chunk_size]
def make_tok_chonks(tokens, chunk_size):
user_chunks = [[[user, chunk] for user, chunk in get_chunks(user, curr_toks, chunk_size)] for user, curr_toks in tokens.items()]
user_chunks = merge_lists(user_chunks)
user_chunks = pd.DataFrame(user_chunks, columns=["User", "Chunk"])
return user_chunks
CHUNK_SIZE = 1000
%%time
user_toks = toks.groupby(curr_posts["poster_id"]).apply(merge_lists)
user_chunks = make_tok_chonks(user_toks, CHUNK_SIZE)
%%time
user_pos = pos.groupby(curr_posts["poster_id"]).apply(merge_lists)
user_pos_chunks = make_tok_chonks(user_pos, CHUNK_SIZE)
%%time
# Get the counts
bow_counts = {i: get_tok_counts(post) for i, post in user_toks.items()}
bow_counts = {i: {tok: count for tok, count in post.items() if tok not in function_words} for i, post in bow_counts.items()}
top_n = get_top_n_toks(bow_counts.values(), 10000)
bow_counts = {i: {tok: count for tok, count in post.items() if tok in top_n} for i, post in bow_counts.items()}
bow_counts = pd.Series(bow_counts)
%%time
# Credit for this cell: http://www.davidsbatista.net/blog/2018/02/28/TfidfVectorizer/
from sklearn.feature_extraction.text import TfidfVectorizer
# If you had counts instead of tokens, you'd use "TfidfTransformer"
def dummy_fun(doc):
return doc
tfidf = TfidfVectorizer(
analyzer='word',
tokenizer=dummy_fun,
preprocessor=dummy_fun,
token_pattern=None)
tfidf_matrix = tfidf.fit_transform(user_toks)
tfidf_feats = tfidf_matrix[:, np.isin(tfidf.get_feature_names(), top_n)].todense()
pos_trigrams = pd.Series([make_ngrams(post, 3) for post in user_pos], index=user_pos.index)
pos_trigrams = pos_trigrams.apply(lambda post: ["_".join(words) for words in post])
top_n = [x[0] for x in Counter(merge_lists(pos_trigrams)).most_common(1000)]
pos_tri_counts = pos_trigrams.apply(Counter).apply(lambda dic: {w: c for w, c in dic.items() if w in top_n})
pos_tri_counts = pd.Series(pos_tri_counts)
# pos_tri_chunks = pd.DataFrame(index=user_pos_chunks.index)
# pos_tri_chunks["User"] = user_pos_chunks["User"]
# pos_tri_chunks["Chunk"] = pos_trigrams
%%time
# Initialise feats to None
bin_feats = None
norm_feats = None
# Make the feature matrix
curr_feat_type = "pos_tri"
SCALE = True
APPLY_PCA = True
if curr_feat_type == "pos_tri":
curr_counts = pos_tri_counts
curr_toks = pos_trigrams
elif curr_feat_type == "bow":
curr_counts = bow_counts
curr_toks = user_toks
elif curr_feat_type == "tfidf":
curr_counts = bow_counts
curr_toks = user_toks
bin_feats = np.vectorize(lambda x: 1 if x > 0 else 0)(tfidf_feats)
norm_feats = tfidf_feats
if bin_feats is None:
curr_feats, curr_feat_names = make_feature_matrix(curr_counts.to_dict(), False)
bin_feats = np.vectorize(lambda x: 1 if x > 0 else 0)(curr_feats)
norm_feats = curr_feats / user_toks.apply(len).values[:,None]
if SCALE:
norm_feats = StandardScaler().fit_transform(norm_feats)
%%time
from sklearn.decomposition import PCA
pca = PCA(.95)
if APPLY_PCA:
norm_feats = pca.fit_transform(norm_feats)
print(f"{norm_feats.shape[1]} components")
%%time
norm_embedding = reduce_features(norm_feats)
fig, ax = plt.subplots(figsize=(8,8))
ax.scatter(norm_feats[:,0], norm_feats[:,1])
plt.show()
from scipy.cluster.hierarchy import dendrogram, linkage
from scipy.cluster.hierarchy import cophenet
from scipy.spatial.distance import pdist
from scipy.spatial.distance import cosine as cosine_dist
from sklearn.cluster import AgglomerativeClustering
def plot_hc(feats, labels, out_fp=None):
# Perform the Hierarchical Clustering
d_list = linkage(feats, 'average', metric="cosine")
# Calculate the Cophenetic Correlation Coefficient
c, coph_dists = cophenet(d_list, pdist(feats))
print("Cophenetic Correlation Coefficient: {}".format(c))
# Do the actual plotting.
fig, ax = plt.subplots(figsize=(12,12))
dendrogram(
d_list,
leaf_rotation=0, # rotates the x axis labels
leaf_font_size=14, # font size for the x axis labels
labels = labels,
ax=ax,
color_threshold=0
)
plt.setp(ax.get_xticklabels(), fontsize=14)
plt.setp(ax.get_yticklabels(), fontsize=14)
ax.set_xlabel("Users", fontsize=14)
ax.set_ylabel("Cosine Distance", fontsize=14)
ax.grid()
plt.tight_layout()
if out_fp is not None:
fig.savefig(out_fp)
plot_hc(norm_feats, user_toks.index.astype(int), out_fp=None)#os.path.join(GRAPH_DIR, f"dendrogram_{curr_feat_type}.pdf"))
%%time
for i in range(2,10):
model = AgglomerativeClustering(n_clusters=i, affinity='cosine', linkage='average')
clusters = model.fit_predict(norm_feats)
avg_silhouette_score = metrics.silhouette_score(norm_feats, clusters, metric="cosine")
print(f"Average silhoette score for {i} clusters:\t", avg_silhouette_score, "\n")
%%time
sil_scores = []
for i in range(2,len(user_toks)):
model = AgglomerativeClustering(n_clusters=i, affinity='cosine', linkage='average')
clusters = model.fit_predict(norm_feats)
sil_scores.append(metrics.silhouette_score(norm_feats, clusters, metric="cosine"))
fig, ax = plt.subplots(figsize=(8,4))
ax.plot(list(range(2, len(user_toks))), sil_scores)
plt.show()
model = AgglomerativeClustering(n_clusters=2, affinity='cosine', linkage='average')
clusters1 = model.fit_predict(norm_feats)
for clust in set(clusters1):
print(f"Num users in Cluster {clust}: {len(user_toks[clusters1==clust].index)}")
for clust in set(clusters1):
print(f"Num words in Cluster {clust}: {len(merge_lists(user_toks[clusters1==clust]))}")
for clust in set(clusters1):
print(f"Avg words in Cluster {clust}: {user_toks[clusters1==clust].apply(len).mean():.0f}")
print(f"Median words in Cluster {clust}: {user_toks[clusters1==clust].apply(len).median():.0f}")
print()
model = AgglomerativeClustering(n_clusters=None, distance_threshold=1, affinity='cosine', linkage='average')
clusters2 = model.fit_predict(norm_feats)
for clust in set(clusters2):
print(f"Num users in Cluster {clust}: {len(user_toks[clusters2==clust].index)}")
for clust in set(clusters2):
print(f"Num words in Cluster {clust}: {len(merge_lists(user_toks[clusters2==clust]))}")
for clust in set(clusters2):
print(f"Avg words in Cluster {clust}: {user_toks[clusters2==clust].apply(len).mean():.0f}")
print(f"Median words in Cluster {clust}: {user_toks[clusters2==clust].apply(len).median():.0f}")
print()
clusters = clusters1
avg_silhouette_score = metrics.silhouette_score(norm_feats, clusters, metric="cosine")
print("Average silhoette score for Normed:\t", avg_silhouette_score, "\n")
for clust in set(clusters):
print(clust, len(user_toks[clusters==clust].index))
print(",".join(map(str, user_toks[user_toks.index.isin(top_20.index)].index)))
print(",".join(map(str, clusters[user_toks.index.isin(top_20.index)])))
for i, (c1, c2) in enumerate(zip(clusters[user_toks.index.isin(top_20.index)], clusters2[user_toks.index.isin(top_20.index)])):
print(i, c1, c2, sep=",")
ling_clusts = {"Cluster 0": list(user_toks[clusters1==0].index),
"Cluster 1": list(user_toks[clusters1==1].index)}
with open(os.path.join(GRAPH_DIR, f"{curr_feat_type}_core_clusts.json"), "w") as out_file:
json.dump(ling_clusts, out_file)
fig, ax = plt.subplots(figsize=(8,8))
for c in set(clusters):
ax.scatter(norm_feats[clusters==c,0], norm_feats[clusters==c,1], label=c, alpha=0.3)
ax.grid()
ax.legend()
plt.show()
user_toks.index[clusters==0]
user_toks.index[clusters==1]
contingency1 = pd.crosstab(user_toks.index.values, [clusters1], rownames=["User"], colnames=["Cluster"])
contingency2 = pd.crosstab(user_toks.index.values, [clusters2], rownames=["User"], colnames=["Cluster"])
fig, ax = plt.subplots(figsize=(8,6))
sns.heatmap(contingency1.loc[top_20.index], annot=True, ax=ax, fmt='g', cmap="Greens")
# fig.savefig(os.path.join(GRAPH_DIR, f"chunk_contingency_2_{curr_feat_type}.pdf"))
plt.show()
fig, ax = plt.subplots(figsize=(4,20))
sns.heatmap(contingency1.loc[posts_per_user.sort_values(ascending=False).head(100).index], annot=True, ax=ax, fmt='g', cmap="Greens")
# fig.savefig(os.path.join(GRAPH_DIR, f"chunk_contingency_2_{curr_feat_type}.pdf"))
plt.show()
fig, ax = plt.subplots(figsize=(8,6))
sns.heatmap(contingency2.loc[top_20.index], annot=True, ax=ax, fmt='g', cmap="Greens")
# fig.savefig(os.path.join(GRAPH_DIR, f"chunk_contingency_2_{curr_feat_type}.pdf"))
plt.show()
fig, ax = plt.subplots(figsize=(4,50))
sns.heatmap(contingency2.loc[posts_per_user.sort_values(ascending=False).index], annot=True, ax=ax, fmt='g', cmap="Greens")
# fig.savefig(os.path.join(GRAPH_DIR, f"chunk_contingency_2_{curr_feat_type}.pdf"))
plt.show()
clust_comparison = pd.crosstab(clusters1, [clusters2], rownames=["Clust 1"], colnames=["Clust 2"])
clust_comparison
fig, ax = plt.subplots(figsize=(6,4))
sns.heatmap(clust_comparison, annot=True, ax=ax, fmt='g', cmap="Greens")
plt.show()
%%time
lr = lambda x, y: np.log2(x / y)
def get_cluster_keywords(tokens, clusters):
clust_counts = tokens.groupby(clusters).apply(
lambda x: [Counter(merge_lists(x))]).apply(
lambda x: x[0])
clust_n_words = {clust: tokens[clusters==clust].apply(len).sum() for clust in set(clusters)}
for clust, counts in clust_counts.items():
other_counts = Counter(merge_lists(tokens[clusters!=clust]))
other_nwords = tokens[clusters!=clust].apply(len).sum()
lrs = calculate_cluster_lrs(counts, other_counts, clust_n_words[clust], other_nwords)
lrs = {w: l for w, l in lrs.items() if counts[w] > 100}
lrs = pd.Series(lrs).sort_values(ascending=False)
t10_lr = lrs.head(20)
b10_lr = lrs.tail(20)
print(clust)
print("-------------------------------------------------------------------------------------")
for (w1, lr1), (w2, lr2) in zip(t10_lr.items(), b10_lr.items()):
print(f"{w1:20}\t{lr1:10.2f}\t|\t{w2:20}\t{lr2:10.2f}")
print("-------------------------------------------------------------------------------------")
def get_cluster_lrs(tokens, clusters, min_freq=100):
clust_counts = tokens.groupby(clusters).apply(
lambda x: [Counter(merge_lists(x))]).apply(
lambda x: x[0])
clust_n_words = {clust: tokens[clusters==clust].apply(len).sum() for clust in set(clusters)}
clust_kw = dict()
for clust, counts in clust_counts.items():
other_counts = Counter(merge_lists(tokens[clusters!=clust]))
other_nwords = tokens[clusters!=clust].apply(len).sum()
lrs = calculate_cluster_lrs(counts, other_counts, clust_n_words[clust], other_nwords)
lrs = {w: l for w, l in lrs.items() if counts[w] > min_freq}
lrs = pd.Series(lrs).sort_values(ascending=False)
clust_kw[clust] = lrs
return clust_kw
clusters = clusters1
%%time
get_cluster_keywords(pos_trigrams, clusters)
get_text_example_of_pos(["PUNCT", "INTJ", "INTJ"],
user_pos[clusters==0],
user_toks[clusters==0],
doc_labels=user_toks[clusters==0].index, n_examples=25)
%%time
concs = make_pos_concordance(["NUM", "PROPN", "NUM"],
user_pos[clusters==1],
user_toks[clusters==1],
doc_labels=user_toks[clusters==1].index)
concs = pd.DataFrame(concs, columns=["user", "conc"])
print("Distribution across users:")
num_uses_per_user = concs.groupby("user").size()
print("Num uses per user", num_uses_per_user, sep="\n", end="\n\n")
print("Mean uses per user:\t", num_uses_per_user.mean())
print("Median uses per user:\t", num_uses_per_user.median())
%%time
get_cluster_keywords(user_toks, clusters)
%%time
clust_lrs = get_cluster_lrs(user_toks, clusters)
(clust_lrs[0]["re'ers"])
(clust_lrs[0]["reers"])
clust_lrs[1]["fe'ers"]
clust_lrs[0]["feers"]
clust_lrs[0]["flat"]
clust_lrs[0]["round"]
c1 = len(make_ngram_concordance(["flat", "earthers"],
user_toks[clusters==0],
doc_labels=user_toks[clusters==0].index))
c2 = len(make_ngram_concordance(["flat", "earthers"],
user_toks[clusters==1],
doc_labels=user_toks[clusters==1].index))
n1 = user_toks[clusters==0].apply(len).sum()
n2 = user_toks[clusters==1].apply(len).sum()
print(f"Cluster 0: {c1} / {n1}")
print(f"Cluster 1: {c2} / {n2}")
lr((c1+0.5)/n1, (c2+0.5)/n2)
c1 = len(make_ngram_concordance(["round", "earthers"],
user_toks[clusters==0],
doc_labels=user_toks[clusters==0].index))
c2 = len(make_ngram_concordance(["round", "earthers"],
user_toks[clusters==1],
doc_labels=user_toks[clusters==1].index))
n1 = user_toks[clusters==0].apply(len).sum()
n2 = user_toks[clusters==1].apply(len).sum()
print(f"Cluster 0: {c1} / {n1}")
print(f"Cluster 1: {c2} / {n2}")
lr((c1+0.5)/n1, (c2+0.5)/n2)
get_ngram_example(["fe'ers"],
user_toks[clusters==0],
doc_labels=user_toks[clusters==0].index, n_examples=25)
get_ngram_example(["flat", "earthers"],
user_toks[clusters==0],
doc_labels=user_toks[clusters==0].index, n_examples=25)
clusters = clusters2
%%time
get_cluster_keywords(pos_trigrams, clusters)
%%time
get_cluster_keywords(user_toks, clusters)
| 0.337094 | 0.555918 |
**Descrição do warm up:**
1. [X] Crie a classe Televisao com os atributos ligada (inicializado com valor False) e canal (inicializado com valor 2).
2. [X] Adicione os atributos tamanho e marca à classe Televisao. Crie dois objetos Televisao e atribua tamanhos e marcas diferentes. Depois, imprima o valor desses atributos de forma a confirmar independência dos valores de cada instância (objeto).
3. [X] Adicione dois novos métodos muda_canal_para_cima e muda_canal_para_baixo. Atualmente, a classe Televisao inicializa o canal com 2. Modifique a classe televisao de forma a receber o canal inicial em seu construtor.
4. [X] Adicione mais dois atributos canal_minimo (valor padrão 1) e canal_maximo (valor padrão 99) e modifique a classe Televisao de forma que, se pedirmos para mudar o canal para baixo, além do mínimo, ela vá para o canal máximo. Se mudarmos para cima, além do canal máximo, que volte ao canal mínimo.
5. [X] Modifique o construtor da classe Televisao de forma que canal_minimo e canal_maximo seja parametros opcionais valendo respectivamente 2 e 14.
6. [X] Crie duas instancias de Televisao, especificando o valor de canal_minimo e canal_maximo por nome.
```
class Televisao:
def __init__(self, marca: str, tamanho: int, canal: int, canal_maximo: int = 14, canal_minimo: int = 2):
self.__ligada = False
self.__marca = marca
self.__tamanho = tamanho
self.__canal_maximo = canal_maximo
self.__canal_minimo = self.__VerifyCanalMinimo(canal_minimo)
self.__canal = self.__CheckValorCanal(canal)
@property
def ligada(self):
return self.__ligada
@property
def canal(self):
return self.__canal
@property
def marca(self):
return self.__marca
@property
def tamanho(self):
return self.__tamanho
@property
def canal_maximo(self):
return self.__canal_maximo
@property
def canal_minimo(self):
return self.__canal_minimo
@canal.setter
def canal(self, canal: int):
self.__canal = self.__CheckValorCanal(canal)
@ligada.setter
def ligada(self, ligada: bool):
self.__ligada = ligada
def __CheckValorCanal(self, canal: int):
if canal < self.canal_minimo:
return self.canal_maximo
elif canal > self.canal_maximo:
return self.canal_minimo
else:
return canal
def __VerifyCanalMinimo(self, canal_minimo: int):
if canal_minimo > self.canal_maximo:
print('Erro: Canal minimo maior que canal maximo indicado!')
return 1
else:
return canal_minimo
def MudarCanalParaBaixo(self):
self.canal = self.canal - 1
def MudarCanalParaCima(self):
self.canal = self.canal + 1
print('>> Instancias de Televisão')
televisaoDaJulia = Televisao('Samsung', 55, 1, canal_minimo = 3, canal_maximo = 6)
televisaoDaLeticia = Televisao('LG', 65, 99, canal_maximo = 4, canal_minimo = 5)
print('[Televisao da Julia] marca:', televisaoDaJulia.marca, ' tamanho: ', televisaoDaJulia.tamanho, ' canal: ', televisaoDaJulia.canal)
print('[Televisao da Julia] canal_minimo: ', televisaoDaJulia.canal_minimo, ' canal_maximo: ', televisaoDaJulia.canal_maximo)
print('[Televisao da Leticia] marca:', televisaoDaLeticia.marca, ' tamanho: ', televisaoDaLeticia.tamanho, ' canal: ', televisaoDaLeticia.canal)
print('[Televisao da Leticia] canal_minimo:', televisaoDaLeticia.canal_minimo, ' canal_maximo: ', televisaoDaLeticia.canal_maximo)
televisaoDaJulia.MudarCanalParaBaixo()
televisaoDaLeticia.MudarCanalParaCima()
print('## Mudança de canal ##')
print('[Televisao da Julia] marca:', televisaoDaJulia.marca, ' tamanho: ', televisaoDaJulia.tamanho, ' canal: ', televisaoDaJulia.canal)
print('[Televisao da Leticia] marca:', televisaoDaLeticia.marca, ' tamanho: ', televisaoDaLeticia.tamanho, ' canal: ', televisaoDaLeticia.canal)
print('')
```
7. [X] Crie classes para representar estados e cidades. Cada estado tem um nome, sigla e cidades. Cada cidade tem nome e população. Escreva um programa de testes que crie três estados com algumas cidades em cada um. Exiba a população de cada estado como a soma da população de suas cidades.
```
class Cidade:
def __init__(self, nome: str, populacao: int):
self.__nome = nome
self.__populacao = populacao
@property
def nome(self):
return self.__nome
@property
def populacao(self):
return self.__populacao
class Estado:
def __init__(self, nome: str, sigla: str, cidades):
self.__nome = nome
self.__sigla = sigla
self.__cidades = cidades
self.__populacao_total = self.__CountPopulacao()
@property
def nome(self):
return self.__nome
@property
def sigla(self):
return self.__sigla
@property
def cidades(self):
return self.__cidades
@property
def populacao_total(self):
return self.__populacao_total
@populacao_total.setter
def populacao_total(self, populacao_total: int):
self.populacao_total = populacao_total
def __CountPopulacao(self):
pop_total = 0
for cidade in self.cidades:
pop_total += cidade.populacao
return pop_total
print('>> Instancias de Cidade e Estado')
uruguaiana = Cidade('Uruguaiana', 200)
portoAlegre = Cidade('Porto Alegre', 5000)
estado = Estado('Rio Grande do Sul', 'RS', [uruguaiana, portoAlegre])
print('[', uruguaiana.nome, ']: populacao de ', uruguaiana.populacao)
print('[', portoAlegre.nome, ']: populacao de ', portoAlegre.populacao)
print('[', estado.nome, ']: populacao total de ', estado.populacao_total)
print('')
```
8. [X] Escreva uma classe Coordenada com atributos x e y, e métodos para mostrar as coordenadas, calcular a distancia para outra coordenada, comparar coordenadas, mostrar no formato coordenada polar.
```
import math
class Coordenada:
def __init__(self, x: float, y: float):
self.__x = x
self.__y = y
@property
def x(self):
return self.__x
@property
def y(self):
return self.__y
def CoordenadaFormatada(self):
return f'({self.x}, {self.y})'
def CalcDistancia(self, outra_coordenada):
__ponto_x = math.pow((outra_coordenada.x - self.x), 2)
__ponto_y = math.pow((outra_coordenada.y - self.y), 2)
return math.sqrt(__ponto_x + __ponto_y)
def CompCoordenadas(self, outraCoordenada):
coordenada_inicial = Coordenada(0, 0)
comp_coordenada = self.CalcDistancia(coordenada_inicial)
comp_outra_coordenada = outraCoordenada.CalcDistancia(coordenada_inicial)
if comp_coordenada > comp_outra_coordenada:
return f'A coordenada ({self.x}, {self.y}) esta mais distante da origem que a coordenada ({outraCoordenada.x}, {outraCoordenada.y})'
elif comp_coordenada < comp_outra_coordenada:
return f'A coordenada ({outraCoordenada.x}, {outraCoordenada.y}) esta mais distante da origem que a coordenada ({self.x}, {self.y})'
else:
return f'Ambas coordenadas [({outraCoordenada.x}, {outraCoordenada.y}), ({self.x}, {self.y})] tem a mesma distancia da origem'
def FormatarEmCoordenadasPolar(self):
Xpow2 = math.pow(self.x, 2)
Ypow2 = math.pow(self.y, 2)
return f'P({math.sqrt(Xpow2+Ypow2)}, {math.atan(self.x/self.y)})'
print('>> Instancias de Coordenadas')
coordenadaA = Coordenada(2, 2)
coordenadaB = Coordenada(3, 3)
print('[Coordenada A]: ', coordenadaA.CoordenadaFormatada())
print('[Coordenada B]: ', coordenadaB.CoordenadaFormatada())
print('[Distancia coordA -> coordB]: ', coordenadaA.CalcDistancia(coordenadaB))
print('[Distancia coordB -> coordA]: ', coordenadaB.CalcDistancia(coordenadaA))
print('[Comparacao da distancia das coordenadas até a origem]: ', coordenadaA.CompCoordenadas(coordenadaB))
print('[coordA -> Polar]', coordenadaA.FormatarEmCoordenadasPolar())
print('[coordB -> Polar]', coordenadaB.FormatarEmCoordenadasPolar())
print('')
```
9. [X] Escreva classes para as seguintes formas: quadrado, retângulo e círculo.
```
class Quadrado:
def __init__(self, lado: float):
self.__lado = lado
@property
def lado(self):
return self.__lado
class Retangulo:
def __init__(self, base: float, altura: float):
self.__base = base
self.__altura = altura
@property
def base(self):
return self.__base
@property
def altura(self):
return self.__altura
class Circulo:
def __init__(self, raio: float):
self.__raio = raio
@property
def raio(self):
return self.__raio
```
10. [X] Escreva uma classe Fracao que armazena dois inteiros, numerador e denominador. <br/>
a. [X] Implemente metodos para somas, subtração, multiplicação e divisão de duas frações <br/>
b. [X] Implemente o método que imprime uma fração no formato numerador / denominador <br/>
c. [X] Implemente um método que inverte a fração <br/>
c. [X] Implemente um método que retorna a fração em valor real <br/>
d. [X] Implemente um método que cria uma fração (numerador/denominador) a partir de um número real <br/>
```
def mmc(num1, num2): # Fonte: http://devfuria.com.br/logica-de-programacao/mmc/
a = num1
b = num2
resto = None
while resto != 0:
resto = a % b
a = b
b = resto
return (num1 * num2) / a
class Fracao:
def __init__(self, numerador: int, denominador: int):
self.__numerador = numerador
self.__denominador = denominador
@property
def numerador(self):
return self.__numerador
@property
def denominador(self):
return self.__denominador
@numerador.setter
def numerador(self, numerador):
self.__numerador = numerador
@denominador.setter
def denominador(self, denominador):
self.__denominador = denominador
def ImprimeFuncao(self):
return f'{self.numerador}/{self.denominador}'
def __OperacaoComumSomaSubtracao(self, fracao, denominador_comum):
return fracao.numerador * (denominador_comum/fracao.denominador)
def InverteFração(self):
[self.numerador, self.denominador] = [self.denominador, self.numerador]
def Soma(self, fracao):
denominador_comum = mmc(self.denominador, fracao.denominador)
resultado_fracao_1 = self.__OperacaoComumSomaSubtracao(self, denominador_comum)
resultado_fracao_2 = self.__OperacaoComumSomaSubtracao(self, denominador_comum)
return f'{(resultado_fracao_1 + resultado_fracao_2)}/{denominador_comum}'
def Subtracao(self, fracao):
denominador_comum = mmc(self.denominador, fracao.denominador)
resultado_fracao_1 = self.__OperacaoComumSomaSubtracao(self, denominador_comum)
resultado_fracao_2 = self.__OperacaoComumSomaSubtracao(fracao, denominador_comum)
return f'{(resultado_fracao_1 - resultado_fracao_2)}/{denominador_comum}'
def Multiplicação(self, fracao):
resultado_numerador = self.numerador * fracao.numerador
resultado_denominador = self.denominador * fracao.denominador
return f'{resultado_numerador}/{resultado_denominador}'
def Divisao(self, fracao):
fracao.InverteFração()
resultado_numerador = self.numerador * fracao.numerador
resultado_denominador = self.denominador * fracao.denominador
return f'{resultado_numerador}/{resultado_denominador}'
def FracaoParaNumeroReal(self):
return self.numerador / self.denominador
def NumeroRealParaFracao(self, numero_real):
return f'{numero_real*5}/{5}'
print('>> Instancias de Fracoes')
fracaoA = Fracao(4, 12)
fracaoB = Fracao(8, 3)
print('[Imprime fracao A]: ', fracaoA.ImprimeFuncao())
print('[Imprime fracao B]: ', fracaoB.ImprimeFuncao())
print('[Soma das fracoes A+B]: ', fracaoA.Soma(fracaoB))
print('[Subtracao das fracoes A-B]: ', fracaoA.Subtracao(fracaoB))
print('[Multiplicacao das fracoes A*B]: ', fracaoA.Multiplicação(fracaoB))
print('[Divisao das fracoes A/B]: ', fracaoA.Divisao(fracaoB))
fracaoA.InverteFração()
print('[Imprime fracao A]: ', fracaoA.ImprimeFuncao())
print('[Numero real da fracao A]: ', fracaoA.FracaoParaNumeroReal())
print('[Numero real 10 em fração]: ', fracaoA.NumeroRealParaFracao(10))
print('')
```
|
github_jupyter
|
class Televisao:
def __init__(self, marca: str, tamanho: int, canal: int, canal_maximo: int = 14, canal_minimo: int = 2):
self.__ligada = False
self.__marca = marca
self.__tamanho = tamanho
self.__canal_maximo = canal_maximo
self.__canal_minimo = self.__VerifyCanalMinimo(canal_minimo)
self.__canal = self.__CheckValorCanal(canal)
@property
def ligada(self):
return self.__ligada
@property
def canal(self):
return self.__canal
@property
def marca(self):
return self.__marca
@property
def tamanho(self):
return self.__tamanho
@property
def canal_maximo(self):
return self.__canal_maximo
@property
def canal_minimo(self):
return self.__canal_minimo
@canal.setter
def canal(self, canal: int):
self.__canal = self.__CheckValorCanal(canal)
@ligada.setter
def ligada(self, ligada: bool):
self.__ligada = ligada
def __CheckValorCanal(self, canal: int):
if canal < self.canal_minimo:
return self.canal_maximo
elif canal > self.canal_maximo:
return self.canal_minimo
else:
return canal
def __VerifyCanalMinimo(self, canal_minimo: int):
if canal_minimo > self.canal_maximo:
print('Erro: Canal minimo maior que canal maximo indicado!')
return 1
else:
return canal_minimo
def MudarCanalParaBaixo(self):
self.canal = self.canal - 1
def MudarCanalParaCima(self):
self.canal = self.canal + 1
print('>> Instancias de Televisão')
televisaoDaJulia = Televisao('Samsung', 55, 1, canal_minimo = 3, canal_maximo = 6)
televisaoDaLeticia = Televisao('LG', 65, 99, canal_maximo = 4, canal_minimo = 5)
print('[Televisao da Julia] marca:', televisaoDaJulia.marca, ' tamanho: ', televisaoDaJulia.tamanho, ' canal: ', televisaoDaJulia.canal)
print('[Televisao da Julia] canal_minimo: ', televisaoDaJulia.canal_minimo, ' canal_maximo: ', televisaoDaJulia.canal_maximo)
print('[Televisao da Leticia] marca:', televisaoDaLeticia.marca, ' tamanho: ', televisaoDaLeticia.tamanho, ' canal: ', televisaoDaLeticia.canal)
print('[Televisao da Leticia] canal_minimo:', televisaoDaLeticia.canal_minimo, ' canal_maximo: ', televisaoDaLeticia.canal_maximo)
televisaoDaJulia.MudarCanalParaBaixo()
televisaoDaLeticia.MudarCanalParaCima()
print('## Mudança de canal ##')
print('[Televisao da Julia] marca:', televisaoDaJulia.marca, ' tamanho: ', televisaoDaJulia.tamanho, ' canal: ', televisaoDaJulia.canal)
print('[Televisao da Leticia] marca:', televisaoDaLeticia.marca, ' tamanho: ', televisaoDaLeticia.tamanho, ' canal: ', televisaoDaLeticia.canal)
print('')
class Cidade:
def __init__(self, nome: str, populacao: int):
self.__nome = nome
self.__populacao = populacao
@property
def nome(self):
return self.__nome
@property
def populacao(self):
return self.__populacao
class Estado:
def __init__(self, nome: str, sigla: str, cidades):
self.__nome = nome
self.__sigla = sigla
self.__cidades = cidades
self.__populacao_total = self.__CountPopulacao()
@property
def nome(self):
return self.__nome
@property
def sigla(self):
return self.__sigla
@property
def cidades(self):
return self.__cidades
@property
def populacao_total(self):
return self.__populacao_total
@populacao_total.setter
def populacao_total(self, populacao_total: int):
self.populacao_total = populacao_total
def __CountPopulacao(self):
pop_total = 0
for cidade in self.cidades:
pop_total += cidade.populacao
return pop_total
print('>> Instancias de Cidade e Estado')
uruguaiana = Cidade('Uruguaiana', 200)
portoAlegre = Cidade('Porto Alegre', 5000)
estado = Estado('Rio Grande do Sul', 'RS', [uruguaiana, portoAlegre])
print('[', uruguaiana.nome, ']: populacao de ', uruguaiana.populacao)
print('[', portoAlegre.nome, ']: populacao de ', portoAlegre.populacao)
print('[', estado.nome, ']: populacao total de ', estado.populacao_total)
print('')
import math
class Coordenada:
def __init__(self, x: float, y: float):
self.__x = x
self.__y = y
@property
def x(self):
return self.__x
@property
def y(self):
return self.__y
def CoordenadaFormatada(self):
return f'({self.x}, {self.y})'
def CalcDistancia(self, outra_coordenada):
__ponto_x = math.pow((outra_coordenada.x - self.x), 2)
__ponto_y = math.pow((outra_coordenada.y - self.y), 2)
return math.sqrt(__ponto_x + __ponto_y)
def CompCoordenadas(self, outraCoordenada):
coordenada_inicial = Coordenada(0, 0)
comp_coordenada = self.CalcDistancia(coordenada_inicial)
comp_outra_coordenada = outraCoordenada.CalcDistancia(coordenada_inicial)
if comp_coordenada > comp_outra_coordenada:
return f'A coordenada ({self.x}, {self.y}) esta mais distante da origem que a coordenada ({outraCoordenada.x}, {outraCoordenada.y})'
elif comp_coordenada < comp_outra_coordenada:
return f'A coordenada ({outraCoordenada.x}, {outraCoordenada.y}) esta mais distante da origem que a coordenada ({self.x}, {self.y})'
else:
return f'Ambas coordenadas [({outraCoordenada.x}, {outraCoordenada.y}), ({self.x}, {self.y})] tem a mesma distancia da origem'
def FormatarEmCoordenadasPolar(self):
Xpow2 = math.pow(self.x, 2)
Ypow2 = math.pow(self.y, 2)
return f'P({math.sqrt(Xpow2+Ypow2)}, {math.atan(self.x/self.y)})'
print('>> Instancias de Coordenadas')
coordenadaA = Coordenada(2, 2)
coordenadaB = Coordenada(3, 3)
print('[Coordenada A]: ', coordenadaA.CoordenadaFormatada())
print('[Coordenada B]: ', coordenadaB.CoordenadaFormatada())
print('[Distancia coordA -> coordB]: ', coordenadaA.CalcDistancia(coordenadaB))
print('[Distancia coordB -> coordA]: ', coordenadaB.CalcDistancia(coordenadaA))
print('[Comparacao da distancia das coordenadas até a origem]: ', coordenadaA.CompCoordenadas(coordenadaB))
print('[coordA -> Polar]', coordenadaA.FormatarEmCoordenadasPolar())
print('[coordB -> Polar]', coordenadaB.FormatarEmCoordenadasPolar())
print('')
class Quadrado:
def __init__(self, lado: float):
self.__lado = lado
@property
def lado(self):
return self.__lado
class Retangulo:
def __init__(self, base: float, altura: float):
self.__base = base
self.__altura = altura
@property
def base(self):
return self.__base
@property
def altura(self):
return self.__altura
class Circulo:
def __init__(self, raio: float):
self.__raio = raio
@property
def raio(self):
return self.__raio
def mmc(num1, num2): # Fonte: http://devfuria.com.br/logica-de-programacao/mmc/
a = num1
b = num2
resto = None
while resto != 0:
resto = a % b
a = b
b = resto
return (num1 * num2) / a
class Fracao:
def __init__(self, numerador: int, denominador: int):
self.__numerador = numerador
self.__denominador = denominador
@property
def numerador(self):
return self.__numerador
@property
def denominador(self):
return self.__denominador
@numerador.setter
def numerador(self, numerador):
self.__numerador = numerador
@denominador.setter
def denominador(self, denominador):
self.__denominador = denominador
def ImprimeFuncao(self):
return f'{self.numerador}/{self.denominador}'
def __OperacaoComumSomaSubtracao(self, fracao, denominador_comum):
return fracao.numerador * (denominador_comum/fracao.denominador)
def InverteFração(self):
[self.numerador, self.denominador] = [self.denominador, self.numerador]
def Soma(self, fracao):
denominador_comum = mmc(self.denominador, fracao.denominador)
resultado_fracao_1 = self.__OperacaoComumSomaSubtracao(self, denominador_comum)
resultado_fracao_2 = self.__OperacaoComumSomaSubtracao(self, denominador_comum)
return f'{(resultado_fracao_1 + resultado_fracao_2)}/{denominador_comum}'
def Subtracao(self, fracao):
denominador_comum = mmc(self.denominador, fracao.denominador)
resultado_fracao_1 = self.__OperacaoComumSomaSubtracao(self, denominador_comum)
resultado_fracao_2 = self.__OperacaoComumSomaSubtracao(fracao, denominador_comum)
return f'{(resultado_fracao_1 - resultado_fracao_2)}/{denominador_comum}'
def Multiplicação(self, fracao):
resultado_numerador = self.numerador * fracao.numerador
resultado_denominador = self.denominador * fracao.denominador
return f'{resultado_numerador}/{resultado_denominador}'
def Divisao(self, fracao):
fracao.InverteFração()
resultado_numerador = self.numerador * fracao.numerador
resultado_denominador = self.denominador * fracao.denominador
return f'{resultado_numerador}/{resultado_denominador}'
def FracaoParaNumeroReal(self):
return self.numerador / self.denominador
def NumeroRealParaFracao(self, numero_real):
return f'{numero_real*5}/{5}'
print('>> Instancias de Fracoes')
fracaoA = Fracao(4, 12)
fracaoB = Fracao(8, 3)
print('[Imprime fracao A]: ', fracaoA.ImprimeFuncao())
print('[Imprime fracao B]: ', fracaoB.ImprimeFuncao())
print('[Soma das fracoes A+B]: ', fracaoA.Soma(fracaoB))
print('[Subtracao das fracoes A-B]: ', fracaoA.Subtracao(fracaoB))
print('[Multiplicacao das fracoes A*B]: ', fracaoA.Multiplicação(fracaoB))
print('[Divisao das fracoes A/B]: ', fracaoA.Divisao(fracaoB))
fracaoA.InverteFração()
print('[Imprime fracao A]: ', fracaoA.ImprimeFuncao())
print('[Numero real da fracao A]: ', fracaoA.FracaoParaNumeroReal())
print('[Numero real 10 em fração]: ', fracaoA.NumeroRealParaFracao(10))
print('')
| 0.710929 | 0.69622 |
# Label neighbor filters
In this notebook, we demonstrate how neighbor-based filters work in the contexts of measurements of cells in tissues. We also determine neighbor of neighbors and extend the radius of such filters.
See also
* [Image Processing Filters for Grids of Cells Analogous to Filters Processing Grids of Pixels](https://www.frontiersin.org/articles/10.3389/fcomp.2021.774396/)
```
import pyclesperanto_prototype as cle
import numpy as np
import matplotlib
from numpy.random import random
cle.select_device("RTX")
# Generate artificial cells as test data
tissue = cle.artificial_tissue_2d()
touch_matrix = cle.generate_touch_matrix(tissue)
cle.imshow(tissue, labels=True)
```
# Associate artificial measurements to the cells
```
centroids = cle.label_centroids_to_pointlist(tissue)
coordinates = cle.pull_zyx(centroids)
values = random([coordinates.shape[1]])
for i, y in enumerate(coordinates[1]):
if (y < 128):
values[i] = values[i] * 10 + 45
else:
values[i] = values[i] * 10 + 90
measurements = cle.push_zyx(np.asarray([values]))
# visualize measurments in space
parametric_image = cle.replace_intensities(tissue, measurements)
cle.imshow(parametric_image, min_display_intensity=0, max_display_intensity=100, color_map='jet')
```
# Local averaging smoothes edges
By averaging measurments locally, we can reduce the noise, but we also introduce a stripe where the region touch
```
local_mean_measurements = cle.mean_of_touching_neighbors(measurements, touch_matrix)
parametric_image = cle.replace_intensities(tissue, local_mean_measurements)
cle.imshow(parametric_image, min_display_intensity=0, max_display_intensity=100, color_map='jet')
```
# Edge preserving filters: median
By averaging using a median filter, we can also reduce noise while keeping the edge between the regions sharp
```
local_median_measurements = cle.median_of_touching_neighbors(measurements, touch_matrix)
parametric_image = cle.replace_intensities(tissue, local_median_measurements)
cle.imshow(parametric_image, min_display_intensity=0, max_display_intensity=100, color_map='jet')
```
# Increasing filter radius: neighbors of neighbors
In order to increase the radius of the operation, we need to determin neighbors of touching neighbors
```
neighbor_matrix = cle.neighbors_of_neighbors(touch_matrix)
local_median_measurements = cle.median_of_touching_neighbors(measurements, neighbor_matrix)
parametric_image = cle.replace_intensities(tissue, local_median_measurements)
cle.imshow(parametric_image, min_display_intensity=0, max_display_intensity=100, color_map='jet')
```
## Short-cuts for visualisation only
If you're not so much interested in the vectors of measurements, there are shortcuts: For example for visualizing the mean value of neighboring pixels with different radii:
```
# visualize measurments in space
measurement_image = cle.replace_intensities(tissue, measurements)
print('original')
cle.imshow(measurement_image, min_display_intensity=0, max_display_intensity=100, color_map='jet')
for radius in range(0, 5):
print('Radius', radius)
# note: this function takes a parametric image the label map instead of a vector and the touch_matrix used above
parametric_image = cle.mean_of_touching_neighbors_map(measurement_image, tissue, radius=radius)
cle.imshow(parametric_image, min_display_intensity=0, max_display_intensity=100, color_map='jet')
```
|
github_jupyter
|
import pyclesperanto_prototype as cle
import numpy as np
import matplotlib
from numpy.random import random
cle.select_device("RTX")
# Generate artificial cells as test data
tissue = cle.artificial_tissue_2d()
touch_matrix = cle.generate_touch_matrix(tissue)
cle.imshow(tissue, labels=True)
centroids = cle.label_centroids_to_pointlist(tissue)
coordinates = cle.pull_zyx(centroids)
values = random([coordinates.shape[1]])
for i, y in enumerate(coordinates[1]):
if (y < 128):
values[i] = values[i] * 10 + 45
else:
values[i] = values[i] * 10 + 90
measurements = cle.push_zyx(np.asarray([values]))
# visualize measurments in space
parametric_image = cle.replace_intensities(tissue, measurements)
cle.imshow(parametric_image, min_display_intensity=0, max_display_intensity=100, color_map='jet')
local_mean_measurements = cle.mean_of_touching_neighbors(measurements, touch_matrix)
parametric_image = cle.replace_intensities(tissue, local_mean_measurements)
cle.imshow(parametric_image, min_display_intensity=0, max_display_intensity=100, color_map='jet')
local_median_measurements = cle.median_of_touching_neighbors(measurements, touch_matrix)
parametric_image = cle.replace_intensities(tissue, local_median_measurements)
cle.imshow(parametric_image, min_display_intensity=0, max_display_intensity=100, color_map='jet')
neighbor_matrix = cle.neighbors_of_neighbors(touch_matrix)
local_median_measurements = cle.median_of_touching_neighbors(measurements, neighbor_matrix)
parametric_image = cle.replace_intensities(tissue, local_median_measurements)
cle.imshow(parametric_image, min_display_intensity=0, max_display_intensity=100, color_map='jet')
# visualize measurments in space
measurement_image = cle.replace_intensities(tissue, measurements)
print('original')
cle.imshow(measurement_image, min_display_intensity=0, max_display_intensity=100, color_map='jet')
for radius in range(0, 5):
print('Radius', radius)
# note: this function takes a parametric image the label map instead of a vector and the touch_matrix used above
parametric_image = cle.mean_of_touching_neighbors_map(measurement_image, tissue, radius=radius)
cle.imshow(parametric_image, min_display_intensity=0, max_display_intensity=100, color_map='jet')
| 0.446253 | 0.988358 |
<a href="https://colab.research.google.com/github/bkkaggle/jax-dcgan/blob/main/dcgan.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
!pip install flax
import requests
import os
if 'TPU_DRIVER_MODE' not in globals():
url = 'http://' + os.environ['COLAB_TPU_ADDR'].split(':')[0] + ':8475/requestversion/tpu_driver_nightly'
resp = requests.post(url)
TPU_DRIVER_MODE = 1
# The following is required to use TPU Driver as JAX's backend.
import os
from jax.config import config
config.FLAGS.jax_xla_backend = "tpu_driver"
config.FLAGS.jax_backend_target = "grpc://" + os.environ['COLAB_TPU_ADDR']
print(config.FLAGS.jax_backend_target)
import os
from functools import partial
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
import jax
import jax.numpy as jnp
import flax
from flax import linen as nn
import tensorflow as tf
import tensorflow_datasets as tfds
def shard(xs):
return jax.tree_map(
lambda x: x.reshape((jax.device_count(), -1) + x.shape[1:]), xs)
class Generator(nn.Module):
training: bool
@nn.compact
def __call__(self, z):
x = nn.ConvTranspose(features=64*8, kernel_size=(4, 4),
strides=(1, 1), padding='VALID', use_bias=False)(z)
x = nn.BatchNorm(
use_running_average=not self.training, momentum=0.9)(x)
x = nn.relu(x)
x = nn.ConvTranspose(features=64*4, kernel_size=(4, 4),
strides=(2, 2), padding='SAME', use_bias=False)(x)
x = nn.BatchNorm(
use_running_average=not self.training, momentum=0.9)(x)
x = nn.relu(x)
x = nn.ConvTranspose(features=64*2, kernel_size=(4, 4),
strides=(2, 2), padding='SAME', use_bias=False)(x)
x = nn.BatchNorm(
use_running_average=not self.training, momentum=0.9)(x)
x = nn.relu(x)
x = nn.ConvTranspose(features=64, kernel_size=(
4, 4), strides=(2, 2), padding='SAME', use_bias=False)(x)
x = nn.BatchNorm(
use_running_average=not self.training, momentum=0.9)(x)
x = nn.relu(x)
x = nn.ConvTranspose(features=1, kernel_size=(
4, 4), strides=(1, 1), padding='SAME', use_bias=False)(x)
return jnp.tanh(x)
class Discriminator(nn.Module):
training: bool
@nn.compact
def __call__(self, x):
x = nn.Conv(features=64, kernel_size=(
4, 4), strides=(2, 2), padding='SAME', use_bias=False)(x)
x = nn.leaky_relu(x, negative_slope=0.2)
x = nn.Conv(features=64*2, kernel_size=(4, 4),
strides=(2, 2), padding='SAME', use_bias=False)(x)
x = nn.BatchNorm(
use_running_average=not self.training, momentum=0.9)(x)
x = nn.leaky_relu(x, negative_slope=0.2)
x = nn.Conv(features=64*4, kernel_size=(4, 4),
strides=(2, 2), padding='SAME', use_bias=False)(x)
x = nn.BatchNorm(
use_running_average=not self.training, momentum=0.9)(x)
x = nn.leaky_relu(x, negative_slope=0.2)
x = nn.Conv(features=64*8, kernel_size=(4, 4),
strides=(2, 2), padding='SAME', use_bias=False)(x)
x = nn.BatchNorm(
use_running_average=not self.training, momentum=0.9)(x)
x = nn.leaky_relu(x, negative_slope=0.2)
x = nn.Conv(features=1, kernel_size=(
1, 1), strides=(4, 4), padding='VALID', use_bias=False)(x)
x = jnp.reshape(x, [x.shape[0], -1])
return x
@jax.vmap
def bce_logits_loss(logit, label):
return jnp.maximum(logit, 0) - logit * label + jnp.log(1 + jnp.exp(-jnp.abs(logit)))
def loss_g(params_g, params_d, batch, rng, variables_g, variables_d):
z = jax.random.normal(rng, shape=(batch.shape[0], 1, 1, 100))
fake_batch, variables_g = Generator(training=True).apply(
{'params': params_g, 'batch_stats': variables_g['batch_stats']}, z, mutable=['batch_stats'])
fake_logits, variables_d = Discriminator(training=True).apply(
{'params': params_d, 'batch_stats': variables_d['batch_stats']}, fake_batch, mutable=['batch_stats'])
real_labels = jnp.ones((batch.shape[0],), dtype=jnp.int32)
return jnp.mean(bce_logits_loss(fake_logits, real_labels)), (variables_g, variables_d)
def loss_d(params_d, params_g, batch, rng, variables_g, variables_d):
z = jax.random.normal(rng, shape=(batch.shape[0], 1, 1, 100))
fake_batch, variables_g = Generator(training=True).apply(
{'params': params_g, 'batch_stats': variables_g['batch_stats']}, z, mutable=['batch_stats'])
real_logits, variables_d = Discriminator(training=True).apply(
{'params': params_d, 'batch_stats': variables_d['batch_stats']}, batch, mutable=['batch_stats'])
fake_logits, variables_d = Discriminator(training=True).apply(
{'params': params_d, 'batch_stats': variables_d['batch_stats']}, fake_batch, mutable=['batch_stats'])
real_labels = jnp.ones((batch.shape[0],), dtype=jnp.int32)
real_loss = bce_logits_loss(real_logits, real_labels)
fake_labels = jnp.zeros((batch.shape[0],), dtype=jnp.int32)
fake_loss = bce_logits_loss(fake_logits, fake_labels)
return jnp.mean(real_loss + fake_loss), (variables_g, variables_d)
@partial(jax.pmap, axis_name='batch')
def train_step(rng, variables_g, variables_d, optimizer_g, optimizer_d, batch):
rng, rng_g, rng_d = jax.random.split(rng, 3)
(g_loss, (variables_g, variables_d)), grad_g = jax.value_and_grad(loss_g, has_aux=True)(
optimizer_g.target, optimizer_d.target, batch, rng_g, variables_g, variables_d)
g_loss = jax.lax.pmean(g_loss, axis_name='batch')
grad_g = jax.lax.pmean(grad_g, axis_name='batch')
optimizer_g = optimizer_g.apply_gradient(grad_g)
(d_loss, (variables_g, variables_d)), grad_d = jax.value_and_grad(loss_d, has_aux=True)(
optimizer_d.target, optimizer_g.target, batch, rng_d, variables_g, variables_d)
d_loss = jax.lax.pmean(d_loss, axis_name='batch')
grad_d = jax.lax.pmean(grad_d, axis_name='batch')
optimizer_d = optimizer_d.apply_gradient(grad_d)
return rng, variables_g, variables_d, optimizer_g, optimizer_d, d_loss, g_loss
def make_dataset(batch_size, seed=1):
mnist = tfds.load("mnist")
def _preprocess(sample):
image = tf.image.convert_image_dtype(sample["image"], tf.float32)
image = tf.image.resize(image, (32, 32))
return 2.0 * image - 1.0
ds = mnist["train"]
ds = ds.map(map_func=_preprocess,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds = ds.shuffle(10 * batch_size, seed=seed).repeat().batch(batch_size)
return iter(tfds.as_numpy(ds))
def main():
dataset = make_dataset(batch_size=256)
rng = jax.random.PRNGKey(42)
rng, rng_g, rng_d = jax.random.split(rng, 3)
init_batch_g = jnp.ones((1, 1, 1, 100), jnp.float32)
variables_g = Generator(training=True).init(rng_g, init_batch_g)
init_batch_d = jnp.ones((1, 32, 32, 1), jnp.float32)
variables_d = Discriminator(training=True).init(rng_d, init_batch_d)
optimizer_g = flax.optim.Adam(
learning_rate=1e-4, beta1=0.5, beta2=0.9).create(variables_g["params"])
optimizer_g = flax.jax_utils.replicate(optimizer_g)
optimizer_d = flax.optim.Adam(
learning_rate=1e-4, beta1=0.5, beta2=0.9).create(variables_d["params"])
optimizer_d = flax.jax_utils.replicate(optimizer_d)
variables_g = flax.jax_utils.replicate(variables_g)
variables_d = flax.jax_utils.replicate(variables_d)
rngs = jax.random.split(rng, num=jax.local_device_count())
g_losses = []
d_losses = []
for i in tqdm(range(2000)):
img_a = shard(next(dataset))
rngs, variables_g, variables_d, optimizer_g, optimizer_d, d_loss, g_loss = train_step(
rngs, variables_g, variables_d, optimizer_g, optimizer_d, img_a)
if i % 10 == 0:
g_losses.append(float(jnp.mean(g_loss)))
d_losses.append(float(jnp.mean(d_loss)))
if i % 500 == 0:
rng, rng_sample = jax.random.split(rng)
z = jax.random.normal(rng_sample, shape=(1, 1, 1, 100))
temp_params_g = flax.jax_utils.unreplicate(
optimizer_g.target)
temp_variables_g = flax.jax_utils.unreplicate(variables_g)
samples = Generator(training=False).apply(
{'params': temp_params_g, 'batch_stats': temp_variables_g['batch_stats']}, z, mutable=False)
img = jnp.reshape((samples + 1) / 2, [32, 32])
plt.imshow(img, cmap='gray')
plt.show()
return g_losses, d_losses
print(jax.devices())
g_losses, d_losses = main()
plt.plot(g_losses, label='g_loss')
plt.plot(d_losses, label='d_loss')
plt.legend()
plt.show()
```
|
github_jupyter
|
!pip install flax
import requests
import os
if 'TPU_DRIVER_MODE' not in globals():
url = 'http://' + os.environ['COLAB_TPU_ADDR'].split(':')[0] + ':8475/requestversion/tpu_driver_nightly'
resp = requests.post(url)
TPU_DRIVER_MODE = 1
# The following is required to use TPU Driver as JAX's backend.
import os
from jax.config import config
config.FLAGS.jax_xla_backend = "tpu_driver"
config.FLAGS.jax_backend_target = "grpc://" + os.environ['COLAB_TPU_ADDR']
print(config.FLAGS.jax_backend_target)
import os
from functools import partial
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
import jax
import jax.numpy as jnp
import flax
from flax import linen as nn
import tensorflow as tf
import tensorflow_datasets as tfds
def shard(xs):
return jax.tree_map(
lambda x: x.reshape((jax.device_count(), -1) + x.shape[1:]), xs)
class Generator(nn.Module):
training: bool
@nn.compact
def __call__(self, z):
x = nn.ConvTranspose(features=64*8, kernel_size=(4, 4),
strides=(1, 1), padding='VALID', use_bias=False)(z)
x = nn.BatchNorm(
use_running_average=not self.training, momentum=0.9)(x)
x = nn.relu(x)
x = nn.ConvTranspose(features=64*4, kernel_size=(4, 4),
strides=(2, 2), padding='SAME', use_bias=False)(x)
x = nn.BatchNorm(
use_running_average=not self.training, momentum=0.9)(x)
x = nn.relu(x)
x = nn.ConvTranspose(features=64*2, kernel_size=(4, 4),
strides=(2, 2), padding='SAME', use_bias=False)(x)
x = nn.BatchNorm(
use_running_average=not self.training, momentum=0.9)(x)
x = nn.relu(x)
x = nn.ConvTranspose(features=64, kernel_size=(
4, 4), strides=(2, 2), padding='SAME', use_bias=False)(x)
x = nn.BatchNorm(
use_running_average=not self.training, momentum=0.9)(x)
x = nn.relu(x)
x = nn.ConvTranspose(features=1, kernel_size=(
4, 4), strides=(1, 1), padding='SAME', use_bias=False)(x)
return jnp.tanh(x)
class Discriminator(nn.Module):
training: bool
@nn.compact
def __call__(self, x):
x = nn.Conv(features=64, kernel_size=(
4, 4), strides=(2, 2), padding='SAME', use_bias=False)(x)
x = nn.leaky_relu(x, negative_slope=0.2)
x = nn.Conv(features=64*2, kernel_size=(4, 4),
strides=(2, 2), padding='SAME', use_bias=False)(x)
x = nn.BatchNorm(
use_running_average=not self.training, momentum=0.9)(x)
x = nn.leaky_relu(x, negative_slope=0.2)
x = nn.Conv(features=64*4, kernel_size=(4, 4),
strides=(2, 2), padding='SAME', use_bias=False)(x)
x = nn.BatchNorm(
use_running_average=not self.training, momentum=0.9)(x)
x = nn.leaky_relu(x, negative_slope=0.2)
x = nn.Conv(features=64*8, kernel_size=(4, 4),
strides=(2, 2), padding='SAME', use_bias=False)(x)
x = nn.BatchNorm(
use_running_average=not self.training, momentum=0.9)(x)
x = nn.leaky_relu(x, negative_slope=0.2)
x = nn.Conv(features=1, kernel_size=(
1, 1), strides=(4, 4), padding='VALID', use_bias=False)(x)
x = jnp.reshape(x, [x.shape[0], -1])
return x
@jax.vmap
def bce_logits_loss(logit, label):
return jnp.maximum(logit, 0) - logit * label + jnp.log(1 + jnp.exp(-jnp.abs(logit)))
def loss_g(params_g, params_d, batch, rng, variables_g, variables_d):
z = jax.random.normal(rng, shape=(batch.shape[0], 1, 1, 100))
fake_batch, variables_g = Generator(training=True).apply(
{'params': params_g, 'batch_stats': variables_g['batch_stats']}, z, mutable=['batch_stats'])
fake_logits, variables_d = Discriminator(training=True).apply(
{'params': params_d, 'batch_stats': variables_d['batch_stats']}, fake_batch, mutable=['batch_stats'])
real_labels = jnp.ones((batch.shape[0],), dtype=jnp.int32)
return jnp.mean(bce_logits_loss(fake_logits, real_labels)), (variables_g, variables_d)
def loss_d(params_d, params_g, batch, rng, variables_g, variables_d):
z = jax.random.normal(rng, shape=(batch.shape[0], 1, 1, 100))
fake_batch, variables_g = Generator(training=True).apply(
{'params': params_g, 'batch_stats': variables_g['batch_stats']}, z, mutable=['batch_stats'])
real_logits, variables_d = Discriminator(training=True).apply(
{'params': params_d, 'batch_stats': variables_d['batch_stats']}, batch, mutable=['batch_stats'])
fake_logits, variables_d = Discriminator(training=True).apply(
{'params': params_d, 'batch_stats': variables_d['batch_stats']}, fake_batch, mutable=['batch_stats'])
real_labels = jnp.ones((batch.shape[0],), dtype=jnp.int32)
real_loss = bce_logits_loss(real_logits, real_labels)
fake_labels = jnp.zeros((batch.shape[0],), dtype=jnp.int32)
fake_loss = bce_logits_loss(fake_logits, fake_labels)
return jnp.mean(real_loss + fake_loss), (variables_g, variables_d)
@partial(jax.pmap, axis_name='batch')
def train_step(rng, variables_g, variables_d, optimizer_g, optimizer_d, batch):
rng, rng_g, rng_d = jax.random.split(rng, 3)
(g_loss, (variables_g, variables_d)), grad_g = jax.value_and_grad(loss_g, has_aux=True)(
optimizer_g.target, optimizer_d.target, batch, rng_g, variables_g, variables_d)
g_loss = jax.lax.pmean(g_loss, axis_name='batch')
grad_g = jax.lax.pmean(grad_g, axis_name='batch')
optimizer_g = optimizer_g.apply_gradient(grad_g)
(d_loss, (variables_g, variables_d)), grad_d = jax.value_and_grad(loss_d, has_aux=True)(
optimizer_d.target, optimizer_g.target, batch, rng_d, variables_g, variables_d)
d_loss = jax.lax.pmean(d_loss, axis_name='batch')
grad_d = jax.lax.pmean(grad_d, axis_name='batch')
optimizer_d = optimizer_d.apply_gradient(grad_d)
return rng, variables_g, variables_d, optimizer_g, optimizer_d, d_loss, g_loss
def make_dataset(batch_size, seed=1):
mnist = tfds.load("mnist")
def _preprocess(sample):
image = tf.image.convert_image_dtype(sample["image"], tf.float32)
image = tf.image.resize(image, (32, 32))
return 2.0 * image - 1.0
ds = mnist["train"]
ds = ds.map(map_func=_preprocess,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds = ds.shuffle(10 * batch_size, seed=seed).repeat().batch(batch_size)
return iter(tfds.as_numpy(ds))
def main():
dataset = make_dataset(batch_size=256)
rng = jax.random.PRNGKey(42)
rng, rng_g, rng_d = jax.random.split(rng, 3)
init_batch_g = jnp.ones((1, 1, 1, 100), jnp.float32)
variables_g = Generator(training=True).init(rng_g, init_batch_g)
init_batch_d = jnp.ones((1, 32, 32, 1), jnp.float32)
variables_d = Discriminator(training=True).init(rng_d, init_batch_d)
optimizer_g = flax.optim.Adam(
learning_rate=1e-4, beta1=0.5, beta2=0.9).create(variables_g["params"])
optimizer_g = flax.jax_utils.replicate(optimizer_g)
optimizer_d = flax.optim.Adam(
learning_rate=1e-4, beta1=0.5, beta2=0.9).create(variables_d["params"])
optimizer_d = flax.jax_utils.replicate(optimizer_d)
variables_g = flax.jax_utils.replicate(variables_g)
variables_d = flax.jax_utils.replicate(variables_d)
rngs = jax.random.split(rng, num=jax.local_device_count())
g_losses = []
d_losses = []
for i in tqdm(range(2000)):
img_a = shard(next(dataset))
rngs, variables_g, variables_d, optimizer_g, optimizer_d, d_loss, g_loss = train_step(
rngs, variables_g, variables_d, optimizer_g, optimizer_d, img_a)
if i % 10 == 0:
g_losses.append(float(jnp.mean(g_loss)))
d_losses.append(float(jnp.mean(d_loss)))
if i % 500 == 0:
rng, rng_sample = jax.random.split(rng)
z = jax.random.normal(rng_sample, shape=(1, 1, 1, 100))
temp_params_g = flax.jax_utils.unreplicate(
optimizer_g.target)
temp_variables_g = flax.jax_utils.unreplicate(variables_g)
samples = Generator(training=False).apply(
{'params': temp_params_g, 'batch_stats': temp_variables_g['batch_stats']}, z, mutable=False)
img = jnp.reshape((samples + 1) / 2, [32, 32])
plt.imshow(img, cmap='gray')
plt.show()
return g_losses, d_losses
print(jax.devices())
g_losses, d_losses = main()
plt.plot(g_losses, label='g_loss')
plt.plot(d_losses, label='d_loss')
plt.legend()
plt.show()
| 0.78233 | 0.784711 |
# Predict survival on the Titanic
This is a simple example and starting point for neural networks with TensorFlow.
We create a feed-forward neural network with two hidden layers (128 and 256 nodes)
and ReLU units.
The test accuracy is around 78.5 % - which is not too bad for such a simple model.
Credits: https://www.kaggle.com/hbaderts/simple-feed-forward-neural-network-with-tensorflow/data
```
import numpy as np
import pandas as pd # For loading and processing the dataset
import tensorflow as tf # Of course, we need TensorFlow.
from sklearn.model_selection import train_test_split
```
## Reading and cleaning the input data
We first read the CSV input file using Pandas.
Next, we remove irrelevant entries, and prepare the data for our neural network.

```
# Read the CSV input file and show first 5 rows
df_train = pd.read_csv('dataset/titanic_dataset/train.csv')
df_train.head(5)
# We can't do anything with the Name, Ticket number, and Cabin, so we drop them.
df_train = df_train.drop(['PassengerId','Name','Ticket', 'Cabin'], axis=1)
# To make 'Sex' numeric, we replace 'female' by 0 and 'male' by 1
df_train['Sex'] = df_train['Sex'].map({'female':0, 'male':1}).astype(int)
# We replace 'Embarked' by three dummy variables 'Embarked_S', 'Embarked_C', and 'Embarked Q',
# which are 1 if the person embarked there, and 0 otherwise.
df_train = pd.concat([df_train, pd.get_dummies(df_train['Embarked'], prefix='Embarked')], axis=1)
df_train = df_train.drop('Embarked', axis=1)
# We normalize the age and the fare by subtracting their mean and dividing by the standard deviation
age_mean = df_train['Age'].mean()
age_std = df_train['Age'].std()
df_train['Age'] = (df_train['Age'] - age_mean) / age_std
fare_mean = df_train['Fare'].mean()
fare_std = df_train['Fare'].std()
df_train['Fare'] = (df_train['Fare'] - fare_mean) / fare_std
# In many cases, the 'Age' is missing - which can cause problems. Let's look how bad it is:
print("Number of missing 'Age' values: {:d}".format(df_train['Age'].isnull().sum()))
# A simple method to handle these missing values is to replace them by the mean age.
df_train['Age'] = df_train['Age'].fillna(df_train['Age'].mean())
# With that, we're almost ready for training
df_train.head()
# Finally, we convert the Pandas dataframe to a NumPy array, and split it into a training and test set
X_train = df_train.drop('Survived', axis=1).as_matrix()
y_train = df_train['Survived'].as_matrix()
X_train, X_test, y_train, y_test = train_test_split(X_train, y_train, test_size=0.2)
# We'll build a classifier with two classes: "survived" and "didn't survive",
# so we create the according labels
# This is taken from https://www.kaggle.com/klepacz/titanic/tensor-flow
labels_train = (np.arange(2) == y_train[:,None]).astype(np.float32)
labels_test = (np.arange(2) == y_test[:,None]).astype(np.float32)
labels_train
```
## Define TensorFlow model
In a first step, we define how our neural network will look.
We create a network with 2 hidden layers with ReLU activations, and an output layer with softmax.
We use dropout for regularization.
```
inputs = tf.placeholder(tf.float32, shape=(None, X_train.shape[1]), name='inputs')
label = tf.placeholder(tf.float32, shape=(None, 2), name='labels')
# First layer
hid1_size = 128
# Weight for the first layer
w1 = tf.Variable(tf.random_normal([hid1_size, X_train.shape[1]]))
# Bias
b1 = tf.Variable(tf.constant(0.1, shape=(hid1_size, 1)))
# Ouput = sigmoid(w1 * inputs + b)
y1 = tf.nn.sigmoid(tf.add(tf.matmul(w1, tf.transpose(inputs)), b1))
# Second layer
hid2_size = 256
w2 = tf.Variable(tf.random_normal([hid2_size, hid1_size]))
b2 = tf.Variable(tf.constant(0.1, shape=(hid2_size, 1)))
y2 = tf.nn.sigmoid(tf.add(tf.matmul(w2, y1), b2))
# Output layer
wo = tf.Variable(tf.random_normal([2, hid2_size]))
bo = tf.Variable(tf.random_normal([2,1]))
yo = tf.transpose(tf.add(tf.matmul(wo, y2), bo))
# X_train.shape[1] == Number of features == 9
X_train.shape[1]
```
The output is a softmax output, and we train it with the cross entropy loss.
We further define functions which calculate the predicted label, and the accuracy of the network.
```
# Loss function and optimizer
learning_rate = tf.placeholder(tf.float32, shape=())
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=yo, labels=label))
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# Prediction
pred = tf.nn.softmax(yo)
pred_label = tf.argmax(pred, 1)
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(label, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
```
## Train the network!
Finally, we are ready to train our network. Let's initialize TensorFlow and start training.
```
# Create operation which will initialize all variables
init =tf.global_variables_initializer()
# Configure GPU not to use all memory
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
# Start a new tensorflow session and initialize variables
sess = tf.InteractiveSession(config=config)
sess.run(init)
# This is the main training loop: we train for 50 epochs with a learning rate of 0.05 and another
# 50 epochs with a smaller learning rate of 0.01
for learning_rate_1 in [0.05, 0.01]:
for epoch in range(50):
avg_cost = 0.0
# For each epoch, we go through all the samples we have.
for i in range(X_train.shape[0]):
# Finally, this is where the magic happens: run our optimizer, feed the current example into X and the current target into Y
_,c = sess.run([optimizer,loss], feed_dict={learning_rate:learning_rate_1, inputs:X_train[i, None], label: labels_train[i, None]})
avg_cost += c
avg_cost /= X_train.shape[0]
# Print the cost in this epcho to the console.
if epoch % 10 == 0:
print("Epoch: {:3d} Train Cost: {:.4f}".format(epoch, avg_cost))
```
We calculate the accuracy on our training set, and (more importantly) our test set.
```
acc_train = accuracy.eval(feed_dict={inputs: X_train, label: labels_train})
print("Train accuracy: {:3.2f}%".format(acc_train*100.0))
acc_test = accuracy.eval(feed_dict={inputs: X_test, label: labels_test})
print("Test accuracy: {:3.2f}%".format(acc_test*100.0))
```
## Predict new passengers
If we're happy with these results, we load the test dataset, and do all pre-processing steps we also did for the training set.
```
df_test = pd.read_csv('dataset/titanic_dataset/test.csv')
df_test.head()
# Do all pre-processing steps as above
df_test = df_test.drop(['Name', 'Ticket', 'Cabin'], axis=1)
df_test['Sex'] = df_test['Sex'].map({'female':0, 'male':1}).astype(int)
df_test = pd.concat([df_test, pd.get_dummies(df_test['Embarked'], prefix='Embarked')], axis=1)
df_test = df_test.drop('Embarked', axis=1)
df_test['Age'] = (df_test['Age'] - age_mean) / age_std
df_test['Fare'] = (df_test['Fare'] - fare_mean) / fare_std
df_test.head()
X_test = df_test.drop('PassengerId', axis=1).as_matrix()
```
Then we predict the label of all our test data
```
# Predict
for i in range(X_test.shape[0]):
df_test.loc[i, 'Survived'] = sess.run(pred_label, feed_dict={inputs: X_test[i, None]}).squeeze()
df_test
# Important: close the TensorFlow session, now that we're finished.
sess.close()
```
## Can you do mini batch training?
|
github_jupyter
|
import numpy as np
import pandas as pd # For loading and processing the dataset
import tensorflow as tf # Of course, we need TensorFlow.
from sklearn.model_selection import train_test_split
# Read the CSV input file and show first 5 rows
df_train = pd.read_csv('dataset/titanic_dataset/train.csv')
df_train.head(5)
# We can't do anything with the Name, Ticket number, and Cabin, so we drop them.
df_train = df_train.drop(['PassengerId','Name','Ticket', 'Cabin'], axis=1)
# To make 'Sex' numeric, we replace 'female' by 0 and 'male' by 1
df_train['Sex'] = df_train['Sex'].map({'female':0, 'male':1}).astype(int)
# We replace 'Embarked' by three dummy variables 'Embarked_S', 'Embarked_C', and 'Embarked Q',
# which are 1 if the person embarked there, and 0 otherwise.
df_train = pd.concat([df_train, pd.get_dummies(df_train['Embarked'], prefix='Embarked')], axis=1)
df_train = df_train.drop('Embarked', axis=1)
# We normalize the age and the fare by subtracting their mean and dividing by the standard deviation
age_mean = df_train['Age'].mean()
age_std = df_train['Age'].std()
df_train['Age'] = (df_train['Age'] - age_mean) / age_std
fare_mean = df_train['Fare'].mean()
fare_std = df_train['Fare'].std()
df_train['Fare'] = (df_train['Fare'] - fare_mean) / fare_std
# In many cases, the 'Age' is missing - which can cause problems. Let's look how bad it is:
print("Number of missing 'Age' values: {:d}".format(df_train['Age'].isnull().sum()))
# A simple method to handle these missing values is to replace them by the mean age.
df_train['Age'] = df_train['Age'].fillna(df_train['Age'].mean())
# With that, we're almost ready for training
df_train.head()
# Finally, we convert the Pandas dataframe to a NumPy array, and split it into a training and test set
X_train = df_train.drop('Survived', axis=1).as_matrix()
y_train = df_train['Survived'].as_matrix()
X_train, X_test, y_train, y_test = train_test_split(X_train, y_train, test_size=0.2)
# We'll build a classifier with two classes: "survived" and "didn't survive",
# so we create the according labels
# This is taken from https://www.kaggle.com/klepacz/titanic/tensor-flow
labels_train = (np.arange(2) == y_train[:,None]).astype(np.float32)
labels_test = (np.arange(2) == y_test[:,None]).astype(np.float32)
labels_train
inputs = tf.placeholder(tf.float32, shape=(None, X_train.shape[1]), name='inputs')
label = tf.placeholder(tf.float32, shape=(None, 2), name='labels')
# First layer
hid1_size = 128
# Weight for the first layer
w1 = tf.Variable(tf.random_normal([hid1_size, X_train.shape[1]]))
# Bias
b1 = tf.Variable(tf.constant(0.1, shape=(hid1_size, 1)))
# Ouput = sigmoid(w1 * inputs + b)
y1 = tf.nn.sigmoid(tf.add(tf.matmul(w1, tf.transpose(inputs)), b1))
# Second layer
hid2_size = 256
w2 = tf.Variable(tf.random_normal([hid2_size, hid1_size]))
b2 = tf.Variable(tf.constant(0.1, shape=(hid2_size, 1)))
y2 = tf.nn.sigmoid(tf.add(tf.matmul(w2, y1), b2))
# Output layer
wo = tf.Variable(tf.random_normal([2, hid2_size]))
bo = tf.Variable(tf.random_normal([2,1]))
yo = tf.transpose(tf.add(tf.matmul(wo, y2), bo))
# X_train.shape[1] == Number of features == 9
X_train.shape[1]
# Loss function and optimizer
learning_rate = tf.placeholder(tf.float32, shape=())
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=yo, labels=label))
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# Prediction
pred = tf.nn.softmax(yo)
pred_label = tf.argmax(pred, 1)
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(label, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
# Create operation which will initialize all variables
init =tf.global_variables_initializer()
# Configure GPU not to use all memory
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
# Start a new tensorflow session and initialize variables
sess = tf.InteractiveSession(config=config)
sess.run(init)
# This is the main training loop: we train for 50 epochs with a learning rate of 0.05 and another
# 50 epochs with a smaller learning rate of 0.01
for learning_rate_1 in [0.05, 0.01]:
for epoch in range(50):
avg_cost = 0.0
# For each epoch, we go through all the samples we have.
for i in range(X_train.shape[0]):
# Finally, this is where the magic happens: run our optimizer, feed the current example into X and the current target into Y
_,c = sess.run([optimizer,loss], feed_dict={learning_rate:learning_rate_1, inputs:X_train[i, None], label: labels_train[i, None]})
avg_cost += c
avg_cost /= X_train.shape[0]
# Print the cost in this epcho to the console.
if epoch % 10 == 0:
print("Epoch: {:3d} Train Cost: {:.4f}".format(epoch, avg_cost))
acc_train = accuracy.eval(feed_dict={inputs: X_train, label: labels_train})
print("Train accuracy: {:3.2f}%".format(acc_train*100.0))
acc_test = accuracy.eval(feed_dict={inputs: X_test, label: labels_test})
print("Test accuracy: {:3.2f}%".format(acc_test*100.0))
df_test = pd.read_csv('dataset/titanic_dataset/test.csv')
df_test.head()
# Do all pre-processing steps as above
df_test = df_test.drop(['Name', 'Ticket', 'Cabin'], axis=1)
df_test['Sex'] = df_test['Sex'].map({'female':0, 'male':1}).astype(int)
df_test = pd.concat([df_test, pd.get_dummies(df_test['Embarked'], prefix='Embarked')], axis=1)
df_test = df_test.drop('Embarked', axis=1)
df_test['Age'] = (df_test['Age'] - age_mean) / age_std
df_test['Fare'] = (df_test['Fare'] - fare_mean) / fare_std
df_test.head()
X_test = df_test.drop('PassengerId', axis=1).as_matrix()
# Predict
for i in range(X_test.shape[0]):
df_test.loc[i, 'Survived'] = sess.run(pred_label, feed_dict={inputs: X_test[i, None]}).squeeze()
df_test
# Important: close the TensorFlow session, now that we're finished.
sess.close()
| 0.64131 | 0.982065 |
## WaveNet training
Here we train WaveNet to classify the MIT-BIH dataset, using hyperparameters drawn from `wavenet_hyperopt`.
```
import datetime
import os
import logging
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras import regularizers
import tools.train as train
import tools.models as models
import tools.plot as plot
# Suppress tensorflow warnings about internal deprecations
logger = tf.get_logger()
logger.setLevel(logging.ERROR)
# Tensorboard logging
rightnow = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
nofftpath = os.path.join("..", "logs", rightnow, "nofft")
## Count data
files = ("../data/mitbih_train.csv", "../data/mitbih_test.csv")
inputs, labels, sparse_labels, df = train.preprocess(*files, fft=False)
# Add a dimension for "channels"
for key in inputs:
inputs[key] = tf.expand_dims(inputs[key], axis=2)
train.class_count(df)
largest_class_count = df["train"].groupby("Classes").size().max()
class_weights = np.divide(largest_class_count, df["train"].groupby("Classes").size().to_numpy())
print("Weighting the classes:", class_weights)
# Hyperparameters
nblocks = 12
nfilters = 94
batch_size = 500
l1_reg = 0.00020208366862898815
l2_reg = 0.0268783897138795
dilation_limit = inputs["train"].shape[1]
# Start
layerlist_res = [("conv", {"filters": nfilters, "width": 1, "padding": "causal"})]
# Residual blocks
models.add_res_blocks(nblocks, nfilters, dilation_limit, layerlist_res)
# End
layerlist_res.extend([
(layers.Activation("relu"),),
("conv", {"filters": nfilters, "width": 1, "padding": "causal"}),
("conv", {"filters": nfilters, "width": 1, "padding": "causal", "activation": "linear"}),
])
config = {
"optimizer": "Nadam",
"loss": "sparse_categorical_crossentropy",
"class_weights": class_weights,
"batch_size": batch_size,
"val_split": 0.05,
"epochs": 300,
"verbose": 0,
"patience": 50,
"metrics": ["accuracy"],
"regularizer": regularizers.l1_l2(l1=l1_reg, l2=l2_reg),
"logdir": nofftpath,
}
inputsize = inputs["train"].shape[1]
ncategories = labels["train"].shape[1]
model_res = models.create_conv1d(inputsize, layerlist_res, ncategories, config)
history = train.train_print(model_res, inputs, sparse_labels, config)
```
Let's see how the model did.
```
plot.plot_fit_history(history)
test_pred = np.argmax(model_res.predict(inputs["test"]), axis=1)
plot.plot_cm(
sparse_labels["test"],
test_pred,
classes=np.array(["N", "S", "V", "F", "Q"]),
normalize=True,
norm_fmt=".3f",
)
```
Let's save the model with it's architecture and weights.
```
os.makedirs(os.path.join("..", "models", rightnow, "nofft"), exist_ok=True)
model_res.save(os.path.join("..", "models", rightnow, "nofft", "wavenet.h5"))
```
|
github_jupyter
|
import datetime
import os
import logging
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras import regularizers
import tools.train as train
import tools.models as models
import tools.plot as plot
# Suppress tensorflow warnings about internal deprecations
logger = tf.get_logger()
logger.setLevel(logging.ERROR)
# Tensorboard logging
rightnow = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
nofftpath = os.path.join("..", "logs", rightnow, "nofft")
## Count data
files = ("../data/mitbih_train.csv", "../data/mitbih_test.csv")
inputs, labels, sparse_labels, df = train.preprocess(*files, fft=False)
# Add a dimension for "channels"
for key in inputs:
inputs[key] = tf.expand_dims(inputs[key], axis=2)
train.class_count(df)
largest_class_count = df["train"].groupby("Classes").size().max()
class_weights = np.divide(largest_class_count, df["train"].groupby("Classes").size().to_numpy())
print("Weighting the classes:", class_weights)
# Hyperparameters
nblocks = 12
nfilters = 94
batch_size = 500
l1_reg = 0.00020208366862898815
l2_reg = 0.0268783897138795
dilation_limit = inputs["train"].shape[1]
# Start
layerlist_res = [("conv", {"filters": nfilters, "width": 1, "padding": "causal"})]
# Residual blocks
models.add_res_blocks(nblocks, nfilters, dilation_limit, layerlist_res)
# End
layerlist_res.extend([
(layers.Activation("relu"),),
("conv", {"filters": nfilters, "width": 1, "padding": "causal"}),
("conv", {"filters": nfilters, "width": 1, "padding": "causal", "activation": "linear"}),
])
config = {
"optimizer": "Nadam",
"loss": "sparse_categorical_crossentropy",
"class_weights": class_weights,
"batch_size": batch_size,
"val_split": 0.05,
"epochs": 300,
"verbose": 0,
"patience": 50,
"metrics": ["accuracy"],
"regularizer": regularizers.l1_l2(l1=l1_reg, l2=l2_reg),
"logdir": nofftpath,
}
inputsize = inputs["train"].shape[1]
ncategories = labels["train"].shape[1]
model_res = models.create_conv1d(inputsize, layerlist_res, ncategories, config)
history = train.train_print(model_res, inputs, sparse_labels, config)
plot.plot_fit_history(history)
test_pred = np.argmax(model_res.predict(inputs["test"]), axis=1)
plot.plot_cm(
sparse_labels["test"],
test_pred,
classes=np.array(["N", "S", "V", "F", "Q"]),
normalize=True,
norm_fmt=".3f",
)
os.makedirs(os.path.join("..", "models", rightnow, "nofft"), exist_ok=True)
model_res.save(os.path.join("..", "models", rightnow, "nofft", "wavenet.h5"))
| 0.67854 | 0.762932 |
# Using submodels in PyBaMM
In this notebook we show how to modify existing models by swapping out submodels, and how to build your own model from scratch using existing submodels. To see all of the models and submodels available in PyBaMM, please take a look at the documentation [here](https://pybamm.readthedocs.io/en/latest/source/models/index.html).
## Changing a submodel in an exisiting battery model
PyBaMM is designed to be a flexible modelling package that allows users to easily compare different models and numerical techniques within a common framework. Battery models within PyBaMM are built up using a number of submodels that describe different physics included within the model, such as mass conservation in the electrolyte or charge conservation in the solid. For ease of use, a number of popular battery models are pre-configured in PyBaMM. As an example, we look at the Single Particle Model (for more information see [here](./models/SPM.ipynb)).
First we import pybamm
```
%pip install pybamm -q # install PyBaMM if it is not installed
import pybamm
```
Then we load the SPM
```
model = pybamm.lithium_ion.SPM()
```
We can look at the submodels that make up the SPM by accessing `model.submodels`, which is a dictionary of the submodel's name (i.e. the physics it represents) and the submodel that is selected
```
for name, submodel in model.submodels.items():
print(name, submodel)
```
When you load a model in PyBaMM it builds by default. Building the model sets all of the model variables and sets up any variables which are coupled between different submodels: this is the process which couples the submodels together and allows one submodel to access variables from another. If you would like to swap out a submodel in an existing battery model you need to load it without building it by passing the keyword `build=False`
```
model = pybamm.lithium_ion.SPM(build=False)
```
This collects all of the submodels which make up the SPM, but doesn't build the model. Now you are free to swap out one submodel for another. For instance, you may want to assume that diffusion within the negative particles is infinitely fast, so that the PDE describing diffusion is replaced with an ODE for the uniform particle concentration. To change a submodel you simply update the dictionary entry, in this case to the `PolynomialSingleParticle` submodel
```
model.submodels["negative particle"] = pybamm.particle.PolynomialSingleParticle(model.param, "Negative","uniform profile")
```
where we pass in the model parameters, the electrode (negative or positive) the submodel corresponds to, and the name of the polynomial we want to use. In the example we assume uniform concentration within the particle, corresponding to a zero-order polynomial.
Now if we look at the submodels again we see that the model for the negative particle has been changed
```
for name, submodel in model.submodels.items():
print(name, submodel)
```
Building the model also sets up the equations, boundary and initial conditions for the model. For example, if we look at `model.rhs` before building we see that it is empty
```
model.rhs
```
If we try to use this empty model, PyBaMM will give an error. So, before proceeding we must build the model
```
model.build_model()
```
Now if we look at `model.rhs` we see that it contains an entry relating to the concentration in each particle, as expected for the SPM
```
model.rhs
```
Now the model can be used in a simulation and solved in the usual way, and we still have access to model defaults such as the default geometry and default spatial methods which are used in the simulation
```
simulation = pybamm.Simulation(model)
simulation.solve([0, 3600])
simulation.plot()
```
## Building a custom model from submodels
Instead of editing a pre-existing model, you may wish to build your own model from scratch by combining existing submodels of you choice. In this section, we build a Single Particle Model in which the diffusion is assumed infinitely fast in both particles.
To begin, we load a base lithium-ion model. This sets up the basic model structure behind the scenes, and also sets the default parameters to be those corresponding to a lithium-ion battery. Note that the base model does not select any default submodels, so there is no need to pass `build=False`.
```
model = pybamm.lithium_ion.BaseModel()
```
Submodels can be added to the `model.submodels` dictionary in the same way that we changed the submodels earlier.
We use the simplest model for the external circuit, which is the "current control" submodel
```
model.submodels["external circuit"] = pybamm.external_circuit.CurrentControl(model.param)
```
We want to build a 1D model, so select the `Uniform` current collector model (if the current collectors are behaving uniformly, then a 1D model is appropriate). We also want the model to be isothermal, so select the thermal model accordingly. Further, we assume that the porosity and active material are constant in space and time.
```
model.submodels["current collector"] = pybamm.current_collector.Uniform(model.param)
model.submodels["thermal"] = pybamm.thermal.isothermal.Isothermal(model.param)
model.submodels["porosity"] = pybamm.porosity.Constant(model.param)
model.submodels["negative active material"] = pybamm.active_material.Constant(
model.param, "Negative", model.options
)
model.submodels["positive active material"] = pybamm.active_material.Constant(
model.param, "Positive", model.options
)
```
We assume that the current density varies linearly in the electrodes. This corresponds the the leading-order terms in Ohm's law in the limit in which the SPM is derived in [[3]](#References)
```
model.submodels["negative electrode potentials"] = pybamm.electrode.ohm.LeadingOrder(
model.param, "Negative"
)
model.submodels["positive electrode potentials"] = pybamm.electrode.ohm.LeadingOrder(
model.param, "Positive"
)
```
We assume uniform concentration in both the negative and positive particles
```
model.submodels["negative particle"] = pybamm.particle.PolynomialSingleParticle(
model.param, "Negative", "uniform profile"
)
model.submodels["positive particle"] = pybamm.particle.PolynomialSingleParticle(
model.param, "Positive", "uniform profile"
)
```
In the Single Particle Model, the overpotential can be obtianed by inverting the Butler-Volmer relation, so we choose the `InverseButlerVolmer` submodel for the interface, with the "main" lithium-ion reaction. Because of how the current is implemented, we also need to separately specify the `CurrentForInverseButlerVolmer` submodel
```
model.submodels[
"negative interface"
] = pybamm.interface.InverseButlerVolmer(model.param, "Negative", "lithium-ion main")
model.submodels[
"positive interface"
] = pybamm.interface.InverseButlerVolmer(model.param, "Positive", "lithium-ion main")
model.submodels[
"negative interface current"
] = pybamm.interface.CurrentForInverseButlerVolmer(
model.param, "Negative", "lithium-ion main"
)
model.submodels[
"positive interface current"
] = pybamm.interface.CurrentForInverseButlerVolmer(
model.param, "Positive", "lithium-ion main"
)
```
We don't want any SEI formation or lithium plating in this model
```
model.submodels["negative sei"] = pybamm.sei.NoSEI(model.param, "Negative")
model.submodels["positive sei"] = pybamm.sei.NoSEI(model.param, "Positive")
model.submodels["negative lithium plating"] = pybamm.lithium_plating.NoPlating(model.param, "Negative")
model.submodels["positive lithium plating"] = pybamm.lithium_plating.NoPlating(model.param, "Positive")
```
Finally, for the electrolyte we assume that diffusion is infinitely fast so that the concentration is uniform, and also use the leading-order model for charge conservation, which leads to a linear variation in ionic current in the electrodes
```
model.submodels["electrolyte diffusion"] = pybamm.electrolyte_diffusion.ConstantConcentration(
model.param
)
model.submodels["electrolyte conductivity"] = pybamm.electrolyte_conductivity.LeadingOrder(
model.param
)
```
Now that we have set all of the submodels we can build the model
```
model.build_model()
```
We can then use the model in a simulation in the usual way
```
simulation = pybamm.Simulation(model)
simulation.solve([0, 3600])
simulation.plot()
```
## References
The relevant papers for this notebook are:
```
pybamm.print_citations()
```
|
github_jupyter
|
%pip install pybamm -q # install PyBaMM if it is not installed
import pybamm
model = pybamm.lithium_ion.SPM()
for name, submodel in model.submodels.items():
print(name, submodel)
model = pybamm.lithium_ion.SPM(build=False)
model.submodels["negative particle"] = pybamm.particle.PolynomialSingleParticle(model.param, "Negative","uniform profile")
for name, submodel in model.submodels.items():
print(name, submodel)
model.rhs
model.build_model()
model.rhs
simulation = pybamm.Simulation(model)
simulation.solve([0, 3600])
simulation.plot()
model = pybamm.lithium_ion.BaseModel()
model.submodels["external circuit"] = pybamm.external_circuit.CurrentControl(model.param)
model.submodels["current collector"] = pybamm.current_collector.Uniform(model.param)
model.submodels["thermal"] = pybamm.thermal.isothermal.Isothermal(model.param)
model.submodels["porosity"] = pybamm.porosity.Constant(model.param)
model.submodels["negative active material"] = pybamm.active_material.Constant(
model.param, "Negative", model.options
)
model.submodels["positive active material"] = pybamm.active_material.Constant(
model.param, "Positive", model.options
)
model.submodels["negative electrode potentials"] = pybamm.electrode.ohm.LeadingOrder(
model.param, "Negative"
)
model.submodels["positive electrode potentials"] = pybamm.electrode.ohm.LeadingOrder(
model.param, "Positive"
)
model.submodels["negative particle"] = pybamm.particle.PolynomialSingleParticle(
model.param, "Negative", "uniform profile"
)
model.submodels["positive particle"] = pybamm.particle.PolynomialSingleParticle(
model.param, "Positive", "uniform profile"
)
model.submodels[
"negative interface"
] = pybamm.interface.InverseButlerVolmer(model.param, "Negative", "lithium-ion main")
model.submodels[
"positive interface"
] = pybamm.interface.InverseButlerVolmer(model.param, "Positive", "lithium-ion main")
model.submodels[
"negative interface current"
] = pybamm.interface.CurrentForInverseButlerVolmer(
model.param, "Negative", "lithium-ion main"
)
model.submodels[
"positive interface current"
] = pybamm.interface.CurrentForInverseButlerVolmer(
model.param, "Positive", "lithium-ion main"
)
model.submodels["negative sei"] = pybamm.sei.NoSEI(model.param, "Negative")
model.submodels["positive sei"] = pybamm.sei.NoSEI(model.param, "Positive")
model.submodels["negative lithium plating"] = pybamm.lithium_plating.NoPlating(model.param, "Negative")
model.submodels["positive lithium plating"] = pybamm.lithium_plating.NoPlating(model.param, "Positive")
model.submodels["electrolyte diffusion"] = pybamm.electrolyte_diffusion.ConstantConcentration(
model.param
)
model.submodels["electrolyte conductivity"] = pybamm.electrolyte_conductivity.LeadingOrder(
model.param
)
model.build_model()
simulation = pybamm.Simulation(model)
simulation.solve([0, 3600])
simulation.plot()
pybamm.print_citations()
| 0.475605 | 0.990624 |
# MNIST with SciKit-Learn and skorch
This notebooks shows how to define and train a simple Neural-Network with PyTorch and use it via skorch with SciKit-Learn.
<table align="left"><td>
<a target="_blank" href="https://colab.research.google.com/github/skorch-dev/skorch/blob/master/notebooks/MNIST.ipynb">
<img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td><td>
<a target="_blank" href="https://github.com/skorch-dev/skorch/blob/master/notebooks/MNIST.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a></td></table>
**Note**: If you are running this in [a colab notebook](https://colab.research.google.com/github/skorch-dev/skorch/blob/master/notebooks/MNIST.ipynb), we recommend you enable a free GPU by going:
> **Runtime** → **Change runtime type** → **Hardware Accelerator: GPU**
If you are running in colab, you should install the dependencies and download the dataset by running the following cell:
```
! [ ! -z "$COLAB_GPU" ] && pip install torch scikit-learn==0.20.* skorch
from sklearn.datasets import fetch_openml
from sklearn.model_selection import train_test_split
import numpy as np
import matplotlib.pyplot as plt
```
## Loading Data
Using SciKit-Learns ```fetch_openml``` to load MNIST data.
```
mnist = fetch_openml('mnist_784', cache=False)
mnist.data.shape
```
## Preprocessing Data
Each image of the MNIST dataset is encoded in a 784 dimensional vector, representing a 28 x 28 pixel image. Each pixel has a value between 0 and 255, corresponding to the grey-value of a pixel.<br />
The above ```featch_mldata``` method to load MNIST returns ```data``` and ```target``` as ```uint8``` which we convert to ```float32``` and ```int64``` respectively.
```
X = mnist.data.astype('float32')
y = mnist.target.astype('int64')
```
To avoid big weights that deal with the pixel values from between [0, 255], we scale `X` down. A commonly used range is [0, 1].
```
X /= 255.0
X.min(), X.max()
```
Note: data is not normalized.
```
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)
assert(X_train.shape[0] + X_test.shape[0] == mnist.data.shape[0])
X_train.shape, y_train.shape
```
### Print a selection of training images and their labels
```
def plot_example(X, y):
"""Plot the first 5 images and their labels in a row."""
for i, (img, y) in enumerate(zip(X[:5].reshape(5, 28, 28), y[:5])):
plt.subplot(151 + i)
plt.imshow(img)
plt.xticks([])
plt.yticks([])
plt.title(y)
plot_example(X_train, y_train)
```
## Build Neural Network with PyTorch
Simple, fully connected neural network with one hidden layer. Input layer has 784 dimensions (28x28), hidden layer has 98 (= 784 / 8) and output layer 10 neurons, representing digits 0 - 9.
```
import torch
from torch import nn
import torch.nn.functional as F
device = 'cuda' if torch.cuda.is_available() else 'cpu'
mnist_dim = X.shape[1]
hidden_dim = int(mnist_dim/8)
output_dim = len(np.unique(mnist.target))
mnist_dim, hidden_dim, output_dim
```
A Neural network in PyTorch's framework.
```
class ClassifierModule(nn.Module):
def __init__(
self,
input_dim=mnist_dim,
hidden_dim=hidden_dim,
output_dim=output_dim,
dropout=0.5,
):
super(ClassifierModule, self).__init__()
self.dropout = nn.Dropout(dropout)
self.hidden = nn.Linear(input_dim, hidden_dim)
self.output = nn.Linear(hidden_dim, output_dim)
def forward(self, X, **kwargs):
X = F.relu(self.hidden(X))
X = self.dropout(X)
X = F.softmax(self.output(X), dim=-1)
return X
```
skorch allows to use PyTorch's networks in the SciKit-Learn setting:
```
from skorch import NeuralNetClassifier
torch.manual_seed(0)
net = NeuralNetClassifier(
ClassifierModule,
max_epochs=20,
lr=0.1,
device=device,
)
net.fit(X_train, y_train);
```
## Prediction
```
from sklearn.metrics import accuracy_score
y_pred = net.predict(X_test)
accuracy_score(y_test, y_pred)
```
An accuracy of about 96% for a network with only one hidden layer is not too bad.
Let's take a look at some predictions that went wrong:
```
error_mask = y_pred != y_test
plot_example(X_test[error_mask], y_pred[error_mask])
```
# Convolutional Network
PyTorch expects a 4 dimensional tensor as input for its 2D convolution layer. The dimensions represent:
* Batch size
* Number of channel
* Height
* Width
As initial batch size the number of examples needs to be provided. MNIST data has only one channel. As stated above, each MNIST vector represents a 28x28 pixel image. Hence, the resulting shape for PyTorch tensor needs to be (x, 1, 28, 28).
```
XCnn = X.reshape(-1, 1, 28, 28)
XCnn.shape
XCnn_train, XCnn_test, y_train, y_test = train_test_split(XCnn, y, test_size=0.25, random_state=42)
XCnn_train.shape, y_train.shape
class Cnn(nn.Module):
def __init__(self, dropout=0.5):
super(Cnn, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=3)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
self.conv2_drop = nn.Dropout2d(p=dropout)
self.fc1 = nn.Linear(1600, 100) # 1600 = number channels * width * height
self.fc2 = nn.Linear(100, 10)
self.fc1_drop = nn.Dropout(p=dropout)
def forward(self, x):
x = torch.relu(F.max_pool2d(self.conv1(x), 2))
x = torch.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
# flatten over channel, height and width = 1600
x = x.view(-1, x.size(1) * x.size(2) * x.size(3))
x = torch.relu(self.fc1_drop(self.fc1(x)))
x = torch.softmax(self.fc2(x), dim=-1)
return x
torch.manual_seed(0)
cnn = NeuralNetClassifier(
Cnn,
max_epochs=10,
lr=0.002,
optimizer=torch.optim.Adam,
device=device,
)
cnn.fit(XCnn_train, y_train);
y_pred_cnn = cnn.predict(XCnn_test)
accuracy_score(y_test, y_pred_cnn)
```
An accuracy of >98% should suffice for this example!
Let's see how we fare on the examples that went wrong before:
```
accuracy_score(y_test[error_mask], y_pred_cnn[error_mask])
```
Over 70% of the previously misclassified images are now correctly identified.
```
plot_example(X_test[error_mask], y_pred_cnn[error_mask])
```
|
github_jupyter
|
! [ ! -z "$COLAB_GPU" ] && pip install torch scikit-learn==0.20.* skorch
from sklearn.datasets import fetch_openml
from sklearn.model_selection import train_test_split
import numpy as np
import matplotlib.pyplot as plt
mnist = fetch_openml('mnist_784', cache=False)
mnist.data.shape
X = mnist.data.astype('float32')
y = mnist.target.astype('int64')
X /= 255.0
X.min(), X.max()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)
assert(X_train.shape[0] + X_test.shape[0] == mnist.data.shape[0])
X_train.shape, y_train.shape
def plot_example(X, y):
"""Plot the first 5 images and their labels in a row."""
for i, (img, y) in enumerate(zip(X[:5].reshape(5, 28, 28), y[:5])):
plt.subplot(151 + i)
plt.imshow(img)
plt.xticks([])
plt.yticks([])
plt.title(y)
plot_example(X_train, y_train)
import torch
from torch import nn
import torch.nn.functional as F
device = 'cuda' if torch.cuda.is_available() else 'cpu'
mnist_dim = X.shape[1]
hidden_dim = int(mnist_dim/8)
output_dim = len(np.unique(mnist.target))
mnist_dim, hidden_dim, output_dim
class ClassifierModule(nn.Module):
def __init__(
self,
input_dim=mnist_dim,
hidden_dim=hidden_dim,
output_dim=output_dim,
dropout=0.5,
):
super(ClassifierModule, self).__init__()
self.dropout = nn.Dropout(dropout)
self.hidden = nn.Linear(input_dim, hidden_dim)
self.output = nn.Linear(hidden_dim, output_dim)
def forward(self, X, **kwargs):
X = F.relu(self.hidden(X))
X = self.dropout(X)
X = F.softmax(self.output(X), dim=-1)
return X
from skorch import NeuralNetClassifier
torch.manual_seed(0)
net = NeuralNetClassifier(
ClassifierModule,
max_epochs=20,
lr=0.1,
device=device,
)
net.fit(X_train, y_train);
from sklearn.metrics import accuracy_score
y_pred = net.predict(X_test)
accuracy_score(y_test, y_pred)
error_mask = y_pred != y_test
plot_example(X_test[error_mask], y_pred[error_mask])
XCnn = X.reshape(-1, 1, 28, 28)
XCnn.shape
XCnn_train, XCnn_test, y_train, y_test = train_test_split(XCnn, y, test_size=0.25, random_state=42)
XCnn_train.shape, y_train.shape
class Cnn(nn.Module):
def __init__(self, dropout=0.5):
super(Cnn, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=3)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
self.conv2_drop = nn.Dropout2d(p=dropout)
self.fc1 = nn.Linear(1600, 100) # 1600 = number channels * width * height
self.fc2 = nn.Linear(100, 10)
self.fc1_drop = nn.Dropout(p=dropout)
def forward(self, x):
x = torch.relu(F.max_pool2d(self.conv1(x), 2))
x = torch.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
# flatten over channel, height and width = 1600
x = x.view(-1, x.size(1) * x.size(2) * x.size(3))
x = torch.relu(self.fc1_drop(self.fc1(x)))
x = torch.softmax(self.fc2(x), dim=-1)
return x
torch.manual_seed(0)
cnn = NeuralNetClassifier(
Cnn,
max_epochs=10,
lr=0.002,
optimizer=torch.optim.Adam,
device=device,
)
cnn.fit(XCnn_train, y_train);
y_pred_cnn = cnn.predict(XCnn_test)
accuracy_score(y_test, y_pred_cnn)
accuracy_score(y_test[error_mask], y_pred_cnn[error_mask])
plot_example(X_test[error_mask], y_pred_cnn[error_mask])
| 0.89115 | 0.994589 |
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-59152712-8');
</script>
# The BSSN Formulation of General Relativity in Generic Curvilinear Coordinates: An Overview
## Author: Zach Etienne
## This tutorial notebook demonstrates how Einstein's equations of general relativity in this formulation are constructed and output within NRPy+.
### As Einstein's equations in this formalism take the form of highly nonlinear, coupled *wave equations*, the [tutorial notebook on the scalar wave equation in curvilinear coordinates](Tutorial-ScalarWaveCurvilinear.ipynb) is *required* reading before beginning this module. That module, as well as its own prerequisite [module on reference metrics within NRPy+](Tutorial-Reference_Metric.ipynb) provides the needed overview of how NRPy+ handles reference metrics.
## Introduction:
NRPy+'s original purpose was to be an easy-to-use code capable of generating Einstein's equations in a broad class of [singular](https://en.wikipedia.org/wiki/Coordinate_singularity), curvilinear coordinate systems, where the user need only input the scale factors of the underlying reference metric. Upon generating these equations, NRPy+ would then leverage SymPy's [common-expression-elimination (CSE)](https://en.wikipedia.org/wiki/Common_subexpression_elimination) and C code generation routines, coupled to its own [single-instruction, multiple-data (SIMD)](https://en.wikipedia.org/wiki/SIMD) functions, to generate highly-optimized C code.
### Background Reading/Lectures:
* Mathematical foundations of BSSN and 3+1 initial value problem decompositions of Einstein's equations:
* [Thomas Baumgarte's lectures on mathematical formulation of numerical relativity](https://www.youtube.com/watch?v=t3uo2R-yu4o&list=PLRVOWML3TL_djTd_nsTlq5aJjJET42Qke)
* [Yuichiro Sekiguchi's introduction to BSSN](http://www2.yukawa.kyoto-u.ac.jp/~yuichiro.sekiguchi/3+1.pdf)
* Extensions to the standard BSSN approach used in NRPy+
* [Brown's covariant "Lagrangian" formalism of BSSN](https://arxiv.org/abs/0902.3652)
* [BSSN in spherical coordinates, using the reference-metric approach of Baumgarte, Montero, Cordero-Carrión, and Müller (2012)](https://arxiv.org/abs/1211.6632)
* [BSSN in generic curvilinear coordinates, using the extended reference-metric approach of Ruchlin, Etienne, and Baumgarte (2018)](https://arxiv.org/abs/1712.07658)
### A Note on Notation:
As is standard in NRPy+,
* Greek indices refer to four-dimensional quantities where the zeroth component indicates temporal (time) component.
* Latin indices refer to three-dimensional quantities. This is somewhat counterintuitive since Python always indexes its lists starting from 0. As a result, the zeroth component of three-dimensional quantities will necessarily indicate the first *spatial* direction.
As a corollary, any expressions involving mixed Greek and Latin indices will need to offset one set of indices by one: A Latin index in a four-vector will be incremented and a Greek index in a three-vector will be decremented (however, the latter case does not occur in this tutorial notebook).
<a id='toc'></a>
# Table of Contents
$$\label{toc}$$
This module lays out the mathematical foundation for the BSSN formulation of Einstein's equations, as detailed in the references in the above Background Reading/Lectures section. It is meant to provide an overview of the basic equations and point of reference for **full tutorial notebooks** linked below:
1. [Step 1](#brownslagrangebssn): [Brown](https://arxiv.org/abs/0902.3652)'s covariant formulation of the BSSN time-evolution equations (see next section for gauge conditions)
1. [Step 1.a](#fullequations): Numerical implementation of BSSN time-evolution equations
1. [Step 1.a.i](#liederivs) ([**BSSN quantities module [start here]**](Tutorial-BSSN_quantities.ipynb); [**BSSN time-evolution module**](Tutorial-BSSN_time_evolution-BSSN_RHSs.ipynb)): Expanding the Lie derivatives; the BSSN time-evolution equations in their final form
1. [Step 2](#gaugeconditions) ([**full tutorial notebook**](Tutorial-BSSN_time_evolution-BSSN_gauge_RHSs.ipynb)): Time-evolution equations for the BSSN |gauge quantities $\alpha$ and $\beta^i$
1. [Step 3](#constraintequations) ([**full tutorial notebook**](Tutorial-BSSN_constraints.ipynb)): The BSSN constraint equations
1. [Step 3.a](#hamiltonianconstraint): The Hamiltonian constraint
1. [Step 3.b](#momentumconstraint): The momentum constraint
1. [Step 4](#gammaconstraint) ([**full tutorial notebook**](Tutorial-BSSN-Enforcing_Determinant_gammabar_equals_gammahat_Constraint.ipynb)): The BSSN algebraic constraint $\hat{\gamma}=\bar{\gamma}$
1. [Step 5](#latex_pdf_output) Output this notebook to $\LaTeX$-formatted PDF file
<a id='brownslagrangebssn'></a>
# Step 1: [Brown](https://arxiv.org/abs/0902.3652)'s covariant formulation of BSSN \[Back to [top](#toc)\]
$$\label{brownslagrangebssn}$$
The covariant "Lagrangian" BSSN formulation of [Brown (2009)](https://arxiv.org/abs/0902.3652), which requires
$$
\partial_t \bar{\gamma} = 0,
$$
results in the BSSN equations taking the following form (Eqs. 11 and 12 in [Ruchlin, Etienne, and Baumgarte (2018)](https://arxiv.org/abs/1712.07658)):
\begin{align}
\partial_{\perp} \bar{\gamma}_{i j} {} = {} & \frac{2}{3} \bar{\gamma}_{i j} \left (\alpha \bar{A}_{k}^{k} - \bar{D}_{k} \beta^{k}\right ) - 2 \alpha \bar{A}_{i j} \; , \\
\partial_{\perp} \bar{A}_{i j} {} = {} & -\frac{2}{3} \bar{A}_{i j} \bar{D}_{k} \beta^{k} - 2 \alpha \bar{A}_{i k} {\bar{A}^{k}}_{j} + \alpha \bar{A}_{i j} K \nonumber \\
& + e^{-4 \phi} \left \{-2 \alpha \bar{D}_{i} \bar{D}_{j} \phi + 4 \alpha \bar{D}_{i} \phi \bar{D}_{j} \phi \right . \nonumber \\
& \left . + 4 \bar{D}_{(i} \alpha \bar{D}_{j)} \phi - \bar{D}_{i} \bar{D}_{j} \alpha + \alpha \bar{R}_{i j} \right \}^{\text{TF}} \; , \\
\partial_{\perp} \phi {} = {} & \frac{1}{6} \left (\bar{D}_{k} \beta^{k} - \alpha K \right ) \; , \\
\partial_{\perp} K {} = {} & \frac{1}{3} \alpha K^{2} + \alpha \bar{A}_{i j} \bar{A}^{i j} \nonumber \\
& - e^{-4 \phi} \left (\bar{D}_{i} \bar{D}^{i} \alpha + 2 \bar{D}^{i} \alpha \bar{D}_{i} \phi \right ) \; , \\
\partial_{\perp} \bar{\Lambda}^{i} {} = {} & \bar{\gamma}^{j k} \hat{D}_{j} \hat{D}_{k} \beta^{i} + \frac{2}{3} \Delta^{i} \bar{D}_{j} \beta^{j} + \frac{1}{3} \bar{D}^{i} \bar{D}_{j} \beta^{j} \nonumber \\
& - 2 \bar{A}^{i j} \left (\partial_{j} \alpha - 6 \partial_{j} \phi \right ) + 2 \bar{A}^{j k} \Delta_{j k}^{i} \nonumber \\
& -\frac{4}{3} \alpha \bar{\gamma}^{i j} \partial_{j} K \\
\end{align}
where
* the $\text{TF}$ superscript denotes the trace-free part.
* $\bar{\gamma}_{ij} = \varepsilon_{i j} + \hat{\gamma}_{ij}$, where $\bar{\gamma}_{ij} = e^{-4\phi} \gamma_{ij}$ is the conformal metric, $\gamma_{ij}$ is the physical metric (see below), and $\varepsilon_{i j}$ encodes information about the non-hatted metric.
* $\gamma_{ij}$, $\beta^i$, and $\alpha$ are the physical (as opposed to conformal) spatial 3-metric, shift vector, and lapse, respectively, which may be defined via the 3+1 decomposition line element (in [$G=c=1$ units](https://en.wikipedia.org/wiki/Planck_units)):
$$ds^2 = -\alpha^2 dt^2 + \gamma_{ij}\left(dx^i + \beta^i dt\right)\left(dx^j + \beta^j dt\right).$$
* $\bar{R}_{ij}$ is the conformal Ricci tensor, computed via
\begin{align}
\bar{R}_{i j} {} = {} & - \frac{1}{2} \bar{\gamma}^{k l} \hat{D}_{k} \hat{D}_{l} \bar{\gamma}_{i j} + \bar{\gamma}_{k(i} \hat{D}_{j)} \bar{\Lambda}^{k} + \Delta^{k} \Delta_{(i j) k} \nonumber \\
& + \bar{\gamma}^{k l} \left (2 \Delta_{k(i}^{m} \Delta_{j) m l} + \Delta_{i k}^{m} \Delta_{m j l} \right ) \; .
\end{align}
* $\partial_{\perp} = \partial_t - \mathcal{L}_\beta$; $\mathcal{L}_\beta$ is the [Lie derivative](https://en.wikipedia.org/wiki/Lie_derivative) along the shift vector $\beta^i$.
* $\partial_0 = \partial_t - \beta^i \partial_i$ is an advective time derivative.
* $\hat{D}_j$ is the [covariant derivative](https://en.wikipedia.org/wiki/Covariant_derivative) with respect to the reference metric $\hat{\gamma}_{ij}$.
* $\bar{D}_j$ is the [covariant derivative](https://en.wikipedia.org/wiki/Covariant_derivative) with respect to the barred spatial 3-metric $\bar{\gamma}_{ij}$
* $\Delta^i_{jk}$ is the tensor constructed from the difference of barred and hatted Christoffel symbols:
$$\Delta^i_{jk} = \bar{\Gamma}^i_{jk} - \hat{\Gamma}^i_{jk}$$
* The related quantity $\Delta^i$ is defined $\Delta^i \equiv \bar{\gamma}^{jk} \Delta^i_{jk}$.
* $\bar{A}_{ij}$ is the conformal, trace-free extrinsic curvature:
$$\bar{A}_{ij} = e^{-4\phi} \left(K_{ij} - \frac{1}{3}\gamma_{ij} K\right),$$
where $K$ is the trace of the extrinsic curvature $K_{ij}$.
<a id='fullequations'></a>
## Step 1.a: Numerical implementation of BSSN time-evolution equations \[Back to [top](#toc)\]
$$\label{fullequations}$$
Regarding the numerical implementation of the above equations, first notice the left-hand sides of the equations include the time derivatives. Numerically, these equations are solved using as an [initial value problem](https://en.wikipedia.org/wiki/Initial_value_problem), where data are specified at a given time $t$, so that the solution at any later time can be obtained using the [Method of Lines (MoL)](https://en.wikipedia.org/wiki/Method_of_lines). MoL requires that the equations be written in the form:
$$\partial_t \vec{U} = \vec{f}\left(\vec{U},\partial_i \vec{U}, \partial_i \partial_j \vec{U},...\right),$$
for the vector of "evolved quantities" $\vec{U}$, where the right-hand side vector $\vec{f}$ *does not* contain *explicit* time derivatives of $\vec{U}$.
Thus we must first rewrite the above equations so that *only* partial derivatives of time appear on the left-hand sides of the equations, meaning that the Lie derivative terms must be moved to the right-hand sides of the equations.
<a id='liederivs'></a>
### Step 1.a.i: Expanding the Lie derivatives; BSSN equations in their final form \[Back to [top](#toc)\]
$$\label{liederivs}$$
In this Step, we provide explicit expressions for the [Lie derivatives](https://en.wikipedia.org/wiki/Lie_derivative) $\mathcal{L}_\beta$ appearing inside the $\partial_\perp = \partial_t - \mathcal{L}_\beta$ operators for $\left\{\bar{\gamma}_{i j},\bar{A}_{i j},W, K, \bar{\Lambda}^{i}\right\}$.
In short, the Lie derivative of tensor weight $w$ is given by (from [the wikipedia article on Lie derivatives](https://en.wikipedia.org/wiki/Lie_derivative))
\begin{align}
(\mathcal {L}_X T) ^{a_1 \ldots a_r}{}_{b_1 \ldots b_s} &= X^c(\partial_c T^{a_1 \ldots a_r}{}_{b_1 \ldots b_s}) \\
&\quad - (\partial_c X ^{a_1}) T ^{c a_2 \ldots a_r}{}_{b_1 \ldots b_s} - \ldots - (\partial_c X^{a_r}) T ^{a_1 \ldots a_{r-1}c}{}_{b_1 \ldots b_s} \\
&\quad + (\partial_{b_1} X^c) T ^{a_1 \ldots a_r}{}_{c b_2 \ldots b_s} + \ldots + (\partial_{b_s} X^c) T ^{a_1 \ldots a_r}{}_{b_1 \ldots b_{s-1} c} + w (\partial_{c} X^c) T ^{a_1 \ldots a_r}{}_{b_1 \ldots b_{s}}
\end{align}
Thus to evaluate the Lie derivative, one must first know the tensor density weight $w$ for each tensor. In this formulation of Einstein's equations, **all evolved quantities have density weight $w=0$**, so according to the definition of Lie derivative above,
\begin{align}
\mathcal{L}_\beta \bar{\gamma}_{ij} &= \beta^k \partial_k \bar{\gamma}_{ij} + \partial_i \beta^k \bar{\gamma}_{kj} + \partial_j \beta^k \bar{\gamma}_{ik}, \\
\mathcal{L}_\beta \bar{A}_{ij} &= \beta^k \partial_k \bar{A}_{ij} + \partial_i \beta^k \bar{A}_{kj} + \partial_j \beta^k \bar{A}_{ik}, \\
\mathcal{L}_\beta \phi &= \beta^k \partial_k \phi, \\
\mathcal{L}_\beta K &= \beta^k \partial_k K, \\
\mathcal{L}_\beta \bar{\Lambda}^i &= \beta^k \partial_k \bar{\Lambda}^i - \partial_k \beta^i \bar{\Lambda}^k
\end{align}
With these definitions, the BSSN equations for the un-rescaled evolved variables in the form $\partial_t \vec{U} = f\left(\vec{U},\partial_i \vec{U}, \partial_i \partial_j \vec{U},...\right)$ become
\begin{align}
\partial_t \bar{\gamma}_{i j} {} = {} & \left[\beta^k \partial_k \bar{\gamma}_{ij} + \partial_i \beta^k \bar{\gamma}_{kj} + \partial_j \beta^k \bar{\gamma}_{ik} \right] + \frac{2}{3} \bar{\gamma}_{i j} \left (\alpha \bar{A}_{k}^{k} - \bar{D}_{k} \beta^{k}\right ) - 2 \alpha \bar{A}_{i j} \; , \\
\partial_t \bar{A}_{i j} {} = {} & \left[\beta^k \partial_k \bar{A}_{ij} + \partial_i \beta^k \bar{A}_{kj} + \partial_j \beta^k \bar{A}_{ik} \right] - \frac{2}{3} \bar{A}_{i j} \bar{D}_{k} \beta^{k} - 2 \alpha \bar{A}_{i k} {\bar{A}^{k}}_{j} + \alpha \bar{A}_{i j} K \nonumber \\
& + e^{-4 \phi} \left \{-2 \alpha \bar{D}_{i} \bar{D}_{j} \phi + 4 \alpha \bar{D}_{i} \phi \bar{D}_{j} \phi + 4 \bar{D}_{(i} \alpha \bar{D}_{j)} \phi - \bar{D}_{i} \bar{D}_{j} \alpha + \alpha \bar{R}_{i j} \right \}^{\text{TF}} \; , \\
\partial_t \phi {} = {} & \left[\beta^k \partial_k \phi \right] + \frac{1}{6} \left (\bar{D}_{k} \beta^{k} - \alpha K \right ) \; , \\
\partial_{t} K {} = {} & \left[\beta^k \partial_k K \right] + \frac{1}{3} \alpha K^{2} + \alpha \bar{A}_{i j} \bar{A}^{i j} - e^{-4 \phi} \left (\bar{D}_{i} \bar{D}^{i} \alpha + 2 \bar{D}^{i} \alpha \bar{D}_{i} \phi \right ) \; , \\
\partial_t \bar{\Lambda}^{i} {} = {} & \left[\beta^k \partial_k \bar{\Lambda}^i - \partial_k \beta^i \bar{\Lambda}^k \right] + \bar{\gamma}^{j k} \hat{D}_{j} \hat{D}_{k} \beta^{i} + \frac{2}{3} \Delta^{i} \bar{D}_{j} \beta^{j} + \frac{1}{3} \bar{D}^{i} \bar{D}_{j} \beta^{j} \nonumber \\
& - 2 \bar{A}^{i j} \left (\partial_{j} \alpha - 6 \partial_{j} \phi \right ) + 2 \alpha \bar{A}^{j k} \Delta_{j k}^{i} -\frac{4}{3} \alpha \bar{\gamma}^{i j} \partial_{j} K
\end{align}
where the terms moved from the right-hand sides to the left-hand sides are enclosed in square braces.
Notice that the shift advection operator $\beta^k \partial_k \left\{\bar{\gamma}_{i j},\bar{A}_{i j},\phi, K, \bar{\Lambda}^{i}, \alpha, \beta^i, B^i\right\}$ appears on the right-hand side of *every* expression. As the shift determines how the spatial coordinates $x^i$ move on the next 3D slice of our 4D manifold, we find that representing $\partial_k$ in these shift advection terms via an *upwinded* finite difference stencil results in far lower numerical errors. This trick is implemented below in all shift advection terms.
As discussed in the [NRPy+ tutorial notebook on BSSN quantities](Tutorial-BSSN_quantities.ipynb), tensorial expressions can diverge at coordinate singularities, so each tensor in the set of BSSN variables
$$\left\{\bar{\gamma}_{i j},\bar{A}_{i j},\phi, K, \bar{\Lambda}^{i}, \alpha, \beta^i, B^i\right\},$$
is written in terms of the corresponding rescaled quantity in the set
$$\left\{h_{i j},a_{i j},\text{cf}, K, \lambda^{i}, \alpha, \mathcal{V}^i, \mathcal{B}^i\right\},$$
respectively, as defined in the [BSSN quantities tutorial](Tutorial-BSSN_quantities.ipynb).
<a id='gaugeconditions'></a>
# Step 2: Time-evolution equations for the BSSN gauge quantities $\alpha$ and $\beta^i$ \[Back to [top](#toc)\]
$$\label{gaugeconditions}$$
As described in the **Background Reading/Lectures** linked to above, the gauge quantities $\alpha$ and $\beta^i$ specify how coordinate time and spatial points adjust from one spatial hypersurface to the next, in our 3+1 decomposition of Einstein's equations.
As choosing $\alpha$ and $\beta^i$ is equivalent to choosing coordinates for where we sample our solution to Einstein's equations, we are completely free to choose $\alpha$ and $\beta^i$ on any given spatial hypersuface. It has been found that fixing $\alpha$ and $\beta^i$ to constant values in the context of dynamical spacetimes results in instabilities, so we generally need to define expressions for $\partial_t \alpha$ and $\partial_t \beta^i$ and couple these equations to the rest of the BSSN time-evolution equations.
Though we are free to choose the form of the right-hand sides of the gauge time evolution equations, very few have been found robust in the presence of (puncture) black holes.
The most commonly adopted gauge conditions for BSSN (i.e., time-evolution equations for the BSSN gauge quantities $\alpha$ and $\beta^i$) are the
* $1+\log$ lapse condition:
$$
\partial_0 \alpha = -2 \alpha K
$$
* Second-order Gamma-driving shift condition:
\begin{align}
\partial_0 \beta^i &= B^{i} \\
\partial_0 B^i &= \frac{3}{4} \partial_{0} \bar{\Lambda}^{i} - \eta B^{i},
\end{align}
where $\partial_0$ is the advection operator; i.e., $\partial_0 A^i = \partial_t A^i - \beta^j \partial_j A^i$. Note that $\partial_{0} \bar{\Lambda}^{i}$ in the right-hand side of the $\partial_{0} B^{i}$ equation is computed by adding $\beta^j \partial_j \bar{\Lambda}^i$ to the right-hand side expression given for $\partial_t \bar{\Lambda}^i$, so no explicit time dependence occurs in the right-hand sides of the BSSN evolution equations and the Method of Lines can be applied directly.
While it is incredibly robust in Cartesian coordinates, [Brown](https://arxiv.org/abs/0902.3652) pointed out that the above time-evolution equation for the shift is not covariant. In fact, we have found this non-covariant version to result in very poor results when solving Einstein's equations in spherical coordinates for a spinning black hole with spin axis pointed in the $\hat{x}$ direction. Therefore we adopt Brown's covariant version as described in the [**full time-evolution equations for the BSSN gauge quantities $\alpha$ and $\beta^i$ tutorial notebook**](Tutorial-BSSN_time_evolution-BSSN_gauge_RHSs.ipynb).
<a id='constraintequations'></a>
# Step 3: The BSSN constraint equations \[Back to [top](#toc)\]
$$\label{constraintequations}$$
In a way analogous to Maxwell's equations, the BSSN decomposition of Einstein's equations are written as a set of time-evolution equations and a set of constraint equations. In this step we present the BSSN constraints
\begin{align}
\mathcal{H} &= 0 \\
\mathcal{M^i} &= 0,
\end{align}
where $\mathcal{H}=0$ is the **Hamiltonian constraint**, and $\mathcal{M^i} = 0$ is the **momentum constraint**. When constructing our spacetime from the initial data, one spatial hypersurface at a time, to confirm that at a given time, the Hamiltonian and momentum constraint violations converge to zero as expected with increased numerical resolution.
<a id='hamiltonianconstraint'></a>
## Step 3.a: The Hamiltonian constraint $\mathcal{H}$ \[Back to [top](#toc)\]
$$\label{hamiltonianconstraint}$$
The Hamiltonian constraint is written (Eq. 13 of [Baumgarte *et al.*](https://arxiv.org/pdf/1211.6632.pdf)):
$$
\mathcal{H} = \frac{2}{3} K^2 - \bar{A}_{ij} \bar{A}^{ij} + e^{-4\phi} \left(\bar{R} - 8 \bar{D}^i \phi \bar{D}_i \phi - 8 \bar{D}^2 \phi\right)
$$
<a id='momentumconstraint'></a>
## Step 3.b: The momentum constraint $\mathcal{M}^i$ \[Back to [top](#toc)\]
$$\label{momentumconstraint}$$
The momentum constraint is written (Eq. 47 of [Ruchlin, Etienne, & Baumgarte](https://arxiv.org/pdf/1712.07658.pdf)):
$$ \mathcal{M}^i = e^{-4\phi} \left(
\frac{1}{\sqrt{\bar{\gamma}}} \hat{D}_j\left(\sqrt{\bar{\gamma}}\bar{A}^{ij}\right) +
6 \bar{A}^{ij}\partial_j \phi -
\frac{2}{3} \bar{\gamma}^{ij}\partial_j K +
\bar{A}^{jk} \Delta\Gamma^i_{jk} + \bar{A}^{ik} \Delta\Gamma^j_{jk}\right)
$$
Notice the momentum constraint as written in [Baumgarte *et al.*](https://arxiv.org/pdf/1211.6632.pdf) is missing a term, as described in [Ruchlin, Etienne, & Baumgarte](https://arxiv.org/pdf/1712.07658.pdf).
<a id='gammaconstraint'></a>
# Step 4: The BSSN algebraic constraint: $\hat{\gamma}=\bar{\gamma}$ \[Back to [top](#toc)\]
$$\label{gammaconstraint}$$
[Brown](https://arxiv.org/abs/0902.3652)'s covariant Lagrangian formulation of BSSN, which we adopt, requires that $\partial_t \bar{\gamma} = 0$, where $\bar{\gamma}=\det \bar{\gamma}_{ij}$. We generally choose to set $\bar{\gamma}=\hat{\gamma}$ in our initial data.
Numerical errors will cause $\bar{\gamma}$ to deviate from a constant in time. This actually disrupts the hyperbolicity of the PDEs (causing crashes), so to cure this, we adjust $\bar{\gamma}_{ij}$ at the end of each Runge-Kutta timestep, so that its determinant satisfies $\bar{\gamma}=\hat{\gamma}$ at all times. We adopt the following, rather standard prescription (Eq. 53 of [Ruchlin, Etienne, and Baumgarte (2018)](https://arxiv.org/abs/1712.07658)):
$$
\bar{\gamma}_{ij} \to \left(\frac{\hat{\gamma}}{\bar{\gamma}}\right)^{1/3} \bar{\gamma}_{ij}.
$$
Notice the expression on the right is guaranteed to have determinant equal to $\hat{\gamma}$.
<a id='latex_pdf_output'></a>
# Step 5: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\]
$$\label{latex_pdf_output}$$
The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename
[Tutorial-BSSN_formulation.pdf](Tutorial-BSSN_formulation.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
```
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-BSSN_formulation")
```
|
github_jupyter
|
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-BSSN_formulation")
| 0.205456 | 0.905448 |
# CarND-MPC
In the is project I try to navigate through the lake track yet another time, but this time I'll be using MPC to guide the vehicle. MPC deals with the problem of following a trajectory by optimizing the acctuator values to acheive the optimal trajectory, this is different from other contollers such as the PID. MPC will control the steering angel delta δ as well as the acceleration which is controlled by the throttle.
[](https://youtu.be/EnNwPT0BTLc)
## The Model
MPC works on optimizing the error for each N steps of dt duration. I used a simple kinematic bicycle model which accounts for the vehicle heading, but its not a complicated model that accounts for tire friction and others variables. Here is the formula of the model:
<img src="./imgs/model_screenshot.png" alt="model" width="500"/>
It uses the vehicle position `x` and `y`, the heading `Ψ`, velocity `v`, Cross Track Error `cte`, and the heading Error `eΨ`, this model is implemented on line *line `133`* of the [`MPC.cpp`](./src/MPC.cpp)
## Timestep Length and Elapsed Duration (N & dt)
N and dt or the duration of trajectory So I started where the lecture suggested with **N** to **25** and **dt** to **0.05** I kept decreasing the N value because a high N value gives smoother result, but its computatonally expensive so I wanted the lowest value that could give the optimal result which was **12**; However, I left the dt value at **0.05** which produced really good results and is convienent for latency calculation later.
## Preprocessing and Polynomial Fitting
### Preprocessing
Here I mainly transform all points to vehicle coordinate space using the `toCarCoordinates()` method at line *line `71`* of the [`main.cpp`](./src/main.cpp) because all of our work will be from the vehicle's prespective. First I translate the values around the origin and align the x axis to the heading of the car.
### Polynomial Fitting
I fit the newly obtained vehicle space waypoints to a 3rd order polynomial using `polyfit()` method at line *line `50`* of the [`main.cpp`](./src/main.cpp), here I used 3 order polynomyal rather than the 1st order that was used in the lectures to have a better curveier fit rather than a linear one because its not a linear model .
## Latency
The project adds a **100 milliseconds ** delay for setting the acctuator values, this counts as **2** steps for the chosen **dt** `2 * 0.005 = 0.1 seconds`. Now we need to account for the delay cause if we use the model as it is we'll always be a **100 ms** (2 time steps) late which could have catastrophic consiquences in the real world and crash in the simulator at high speeds. So I kept track of previous acctuator values for the duration of the latency which in turn includes it in the optimization and negates the effect of the latency. I had to increase most of the indeices by 2 to account for the latency.
I added the following variables to the [`MPC.h`](./src/MPC.h)
```
double prev_a{0};
double prev_delta{0.1};
const int latency_timestep = 2;
```
and used them to constraint the acctuator values to the previous values to account for latency as you can see at lines * `204` to `216`* of the [`MPC.cpp`](./src/MPC.cpp).
## Tuning
Here I added a penlty as the Tuning MPC part of the MPC lesson suggested. I used multiple values `[100, 500, 600, 700]` and observed their steering values as you can see in the figures below.
### 100

Here the car crashed after **146** steps, so I knew I had to do better.
### 500

Here its a farcry from the previous value and the car drives really well around the track without crashing.However, in the lesson it was said that by increasing the value we can get a smoother result so I kept going.
### 600

And here is the result for the value of **600** smoother than **500**, but not by much so lets go higher.
### 700

This one is a bit better than **600** and 2 bits better than **500** and the car drives even smoother, so I'm going to stop here and hope fully test higher numbers later.
### Conclusion
They all look alike dont they, well all except for 100 which crashed; however, if we look closer we can see that by increasing the value we get a smoother transission as you can see here:
500| 600|700|
---|:--:|:--:
|||
So thats why I stuck with **700** as a correction or smoothening value. I noticed that everytime we increase the value the peak values go a little higher, so as increasing the value is good for smoothening it elongates the peaks in order to get a smoother turn which could be a problem with higher values.
|
github_jupyter
| 0.096376 | 0.960175 |
|
## Demo: BasicActivity
The basic steps to set up an OpenCLSim simulation are:
* Import libraries
* Initialise simpy environment
* Define object classes
* Create objects
* Create sites
* Create vessels
* Create activities
* Register processes and run simpy
----
This notebook shows a very simple OpenCLSim example. Basically we use OpenCLSim to yield a timeout only. Some basic functionality of the logging is shown.
#### 0. Import libraries
```
import datetime, time
import simpy
import pandas as pd
import openclsim.core as core
import openclsim.model as model
import openclsim.plot as plot
```
#### 1. Initialise simpy environment
```
# setup environment
simulation_start = 0
my_env = simpy.Environment(initial_time=simulation_start)
```
#### 2. Define object classes
In this simple example we won't define specific classes. We will only use a built in activity from OpenCLSim.
#### 3. Create objects
##### 3.1. Create site object(s)
No site objects are created.
##### 3.2. Create vessel object(s)
No vessel objects are created.
##### 3.3 Create activity/activities
```
# initialise registry
registry = {}
# create a basic activity (it just creates an event that shifts time for 'duration')
activity = model.BasicActivity(
env=my_env,
name="Basic activity",
registry=registry,
duration=42,
)
```
#### 4. Register processes and run simpy
```
# initate the simpy processes defined in the 'basic activity' and run simpy
model.register_processes([activity])
my_env.run()
```
#### 5. Inspect results
##### 5.1 Inspect logs
The method plot.get_log_dataframe returns the log of an activity in the form of a dataframe. By adding other activities in a list as the second argument, the Activity can be made more human readable.
```
plot.get_log_dataframe(activity)
plot.get_log_dataframe(activity, [activity])
```
#### 6. Additional logging
The scenario can be extended by adding additional logging instances, that is, the information of the basic activity will also be logged in the activity log of additional activities. This is useful for more complex nested activities. In this example a reporting activity is added where the basic activity log is also added.
```
# initiate SimPy environment
simulation_start = 0
my_env = simpy.Environment(initial_time=simulation_start)
# create activities
registry = {}
reporting_activity = model.BasicActivity(
env=my_env,
name="Reporting activity",
registry=registry,
duration=0,
)
basic_activity = model.BasicActivity(
env=my_env,
name="Basic activity",
registry=registry,
duration=42,
additional_logs=[reporting_activity],
)
# initate the simpy processes defined in the 'while activity' and run simpy
model.register_processes([basic_activity])
my_env.run()
```
#### 6.1 Inspect results
We now show the dataframe of the reporting_activity log, and use the basic_activity to make it more human readable.
```
plot.get_log_dataframe(reporting_activity, [basic_activity])
```
|
github_jupyter
|
import datetime, time
import simpy
import pandas as pd
import openclsim.core as core
import openclsim.model as model
import openclsim.plot as plot
# setup environment
simulation_start = 0
my_env = simpy.Environment(initial_time=simulation_start)
# initialise registry
registry = {}
# create a basic activity (it just creates an event that shifts time for 'duration')
activity = model.BasicActivity(
env=my_env,
name="Basic activity",
registry=registry,
duration=42,
)
# initate the simpy processes defined in the 'basic activity' and run simpy
model.register_processes([activity])
my_env.run()
plot.get_log_dataframe(activity)
plot.get_log_dataframe(activity, [activity])
# initiate SimPy environment
simulation_start = 0
my_env = simpy.Environment(initial_time=simulation_start)
# create activities
registry = {}
reporting_activity = model.BasicActivity(
env=my_env,
name="Reporting activity",
registry=registry,
duration=0,
)
basic_activity = model.BasicActivity(
env=my_env,
name="Basic activity",
registry=registry,
duration=42,
additional_logs=[reporting_activity],
)
# initate the simpy processes defined in the 'while activity' and run simpy
model.register_processes([basic_activity])
my_env.run()
plot.get_log_dataframe(reporting_activity, [basic_activity])
| 0.301259 | 0.952794 |
# Proyecto Final Arquitectura de Computadores: "Construyendo un compilador"
Este Proyecto esta basado en una implementación de los proyectos # 10 y 11 del cursos "Nand2Tetris", que cual se basa en una implementación de un compilador para un lenguaje de alto nivel orientadoa objetos creado por los desarrolladores del curso conocido como lenguage JACK.
La implementación a realizar sera la parte del FrontEnd de compilador, es decir, se tomara como entrada un archivo "program.jack" y se retornará un archivo "program.vm" que puede ser ejecutado por el VM Reader que brinda el software generado para el curso.
### Integrantes:
* Geyner Felipe Rojas Torres
* Isabel Cristina Gómez Balvin
* Iván Daniel Maestre Muza
* Iván David Ortiz Pineda
## Parte 1: Definiendo un Token
```
class Token:
def __init__(self, valor, tipo):
self.valor = valor
if tipo in ["keyword", "symbol", "identifier", "integerConstant", "stringConstant"]:
self.tipo = tipo
else:
raise NameError(f"ERROR: No se puede ingresar un token con tipo {tipo}")
def __str__(self):
if self.valor == "<":
return f"<{self.tipo}> < </{self.tipo}>\n"
elif self.valor == ">":
return f"<{self.tipo}> > </{self.tipo}>\n"
elif self.valor == "&":
return f"<{self.tipo}> & </{self.tipo}>\n"
else:
return f"<{self.tipo}> {self.valor} </{self.tipo}>\n"
def clase(self):
return "Token"
tok = Token(",", "symbol")
print(Token(">", "symbol"))
print(Token(";", "symbol"))
```
## Parte 2: Creando el Tokenizer
```
class Tokenizer:
def __init__(self, ruta):
self.cargar(ruta)
def __str__(self):
texto = ""
for token in self._tokens:
texto = texto + str(token)
return texto
def cargar(self, ruta):
self._varciar()
self._ruta = ruta
archivo = open(ruta, 'r')
for linea in archivo.readlines():
linea = linea.strip()
if (linea != '') and (linea[0:2] != "//"):
if linea[0:3] == "/**":
pass
elif linea[0] == "*":
if linea.find("*/"):
pass
elif linea.find('//') != -1:
self._programa = self._programa + linea[:linea.find('//')].strip()
else:
self._programa = self._programa + linea.strip()
archivo.close()
print("Archivo cargado completamente.")
def separar(self):
sep_comillas = self._programa.split('"')
#palabras = [''.join([f" {c} " if self.symbol(c) else c for c in val]).split() if ind%2 == 0 else [f'"{val}"'] for ind, val in enumerate(sep_comillas)]
for ind, var in enumerate(sep_comillas):
if ind%2 == 0:
espaciado = []
for c in var:
if self.symbol(c):
espaciado.append(f" {c} ")
else:
espaciado.append(c)
self._clasificar(''.join(espaciado).split())
else:
self._clasificar([f'"{var}"'])
def _clasificar(self, lista):
for token in lista:
if self.keyword(token):
self._tokens.append(Token(token, "keyword"))
elif self.symbol(token):
self._tokens.append(Token(token, "symbol"))
elif '"' in token:
self._tokens.append(Token(token[1:-1], "stringConstant"))
elif token.isdigit():
self._tokens.append(Token(token, "integerConstant"))
elif not token[0].isdigit():
self._tokens.append(Token(token, "identifier"))
else:
raise NameError(f"ERROR: Declaración de la variable {token} no fue realizada correctamente.")
def escribir(self):
archivo = open(self._ruta.replace(".jack", "T_.xml"), 'w')
archivo.write(str(self))
archivo.close()
self._varciar()
def _varciar(self):
self._programa = ""
self._tokens = []
def get_programa(self):
return self._programa
def get_lista_tokens(self):
return self._tokens
def keyword(self, texto):
return texto in ["class", "constructor", "function", "method", "field", "static", "var", "int", "char", "boolean", "void", "true", "false", "null", "this", "let", "do", "if", "else", "while", "return"]
def symbol(self, texto):
return texto in ["{", "}", "(", ")", "[", "]", ".", ",", ";", "+", "-", "*", "/", "&", "|", "<", ">", "=", "~"]
tok = Tokenizer(r'C:\Users\idop7\Desktop\nand2tetris\projects\10\ArrayTest\Main.jack')
#print(tok)
tok.separar()
tok.escribir()
print(tok)
```
## Parte 3: Creando el parser
```
class Gramatica:
def __init__(self,nombre):
self.valor = nombre
self._reglas = []
def __str__(self):
texto = f"<{self.valor}>\n"
for regla in self._reglas:
if regla.clase() == "Gramatica":
txt = str(regla).split('\n')[:-1]
txt = ["\t"+t+"\n" for t in txt]
texto = texto + ''.join(txt)
else:
texto = texto + '\t' + str(regla)
return texto + f"</{self.valor}>\n"
def sum(self, regla):
self._reglas.append(regla)
def get_nombre(self):
return self._nombre
def get_reglas(self):
return self._reglas
def get_sub(self):
return self._reglas
def clase(self):
return "Gramatica"
gram = Gramatica('clase')
gram.sum(Token('class', 'keyword'))
gram.sum(Token('main', 'identifier'))
gram2 = Gramatica('registro')
gram2.sum(gram)
gram2.sum(Token(';', 'symbol'))
print(gram2)
class Analizer:
def __init__(self,ruta):
self._ruta = ruta
tokenizer = Tokenizer(ruta)
tokenizer.separar()
self._tokens = tokenizer.get_lista_tokens()
self._pos = 0
self._gramaticas = None
def __str__(self):
return str(self._gramaticas)
def analizar(self):
self._gramaticas = self.compila_clase()
if not self._gramaticas:
raise NameError(f"ERROR: Hay un error de sintaxis. Por favor revise su código")
def escribir(self):
archivo = open(self._ruta.replace(".jack", "_.xml"), 'w')
archivo.write(str(self))
archivo.close()
def compila_clase(self):
ini = self._pos
gram = Gramatica("class")
token = self._sig()
if token.valor == "class":
gram.sum(token)
token = self._sig()
if token.tipo == "identifier":
gram.sum(token)
token = self._sig()
if token.valor == "{":
gram.sum(token)
varc = self.compila_variables_clase()
while varc:
gram.sum(varc)
varc = self.compila_variables_clase()
dsr = self.compila_declaracion_subrutina()
while dsr:
gram.sum(dsr)
dsr = self.compila_declaracion_subrutina()
token = self._sig()
if token.valor == "}":
gram.sum(token)
return gram
self._repos(ini)
return None
def compila_variables_clase(self):
ini = self._pos
gram = Gramatica("classVarDec")
token = self._sig()
if token.valor in ['static', 'field']:
gram.sum(token)
token = self._sig()
if self.tipo(token):
gram.sum(token)
token = self._sig()
if token.tipo == "identifier":
gram.sum(token)
token = self._sig()
while token.valor == ",":
tok_aux = self._sig()
if tok_aux.tipo == "identifier":
gram.sum(token)
gram.sum(tok_aux)
token = self._sig()
else:
self._dev()
self._dev()
return None
if token.valor == ";":
gram.sum(token)
return gram
self._repos(ini)
return None
def compila_declaracion_subrutina(self):
ini = self._pos
gram = Gramatica("subroutineDec")
token = self._sig()
if token.valor in ['constructor', 'function', 'method']:
gram.sum(token)
token = self._sig()
if self.tipo(token) or token.valor == "void":
gram.sum(token)
token = self._sig()
if token.tipo == "identifier":
gram.sum(token)
token = self._sig()
if token.valor == "(":
gram.sum(token)
gram.sum(self.compila_parametros())
token = self._sig()
if token.valor == ")":
gram.sum(token)
csr = self.compila_cuerpo_subrutina()
if csr:
gram.sum(csr)
return gram
self._repos(ini)
return None;
def compila_parametros(self):
ini = self._pos
gram = Gramatica("parameterList")
token1 = self._sig()
token2 = self._sig()
if self.parametro(token1, token2):
gram.sum(token1)
gram.sum(token2)
token = self._sig()
while token.valor == ",":
token1 = self._sig()
token2 = self._sig()
if self.parametro(token1, token2):
gram.sum(token)
gram.sum(token1)
gram.sum(token2)
token = self._sig()
else:
self._dev()
self._dev()
self._dev()
return gram
self._dev()
return gram
self._repos(ini)
return gram
def compila_cuerpo_subrutina(self):
ini = self._pos
gram = Gramatica("subroutineBody")
token = self._sig()
if token.valor == "{":
gram.sum(token)
var = self.compila_variable()
while var:
gram.sum(var)
var = self.compila_variable()
gram.sum(self.compila_declaraciones())
token = self._sig()
if token.valor == "}":
gram.sum(token)
return gram
self._repos(ini)
return None
def compila_variable(self):
ini = self._pos
gram = Gramatica("varDec")
token = self._sig()
if token.valor == "var":
gram.sum(token)
token = self._sig()
if self.tipo(token):
gram.sum(token)
token = self._sig()
if token.tipo == "identifier":
gram.sum(token)
token = self._sig()
while token.valor == ",":
tok_aux = self._sig()
if tok_aux.tipo == "identifier":
gram.sum(token)
gram.sum(tok_aux)
token = self._sig()
else:
self._dev()
self._dev()
return None
if token.valor == ";":
gram.sum(token)
return gram
self._repos(ini)
return None
def compila_declaraciones(self):
ini = self._pos
gram = Gramatica("statements")
dec = self.compila_declaracion()
while dec:
gram.sum(dec)
dec = self.compila_declaracion()
return gram
def compila_declaracion(self):
token = self._sig()
self._dev()
if token.valor == "let":
return self.compila_let()
elif token.valor == "if":
return self.compila_if()
elif token.valor == "while":
return self.compila_while()
elif token.valor == "do":
return self.compila_do()
elif token.valor == "return":
return self.compila_return()
else:
return None
def compila_let(self):
ini = self._pos
gram = Gramatica("letStatement")
token = self._sig()
if token.valor == "let":
gram.sum(token)
token = self._sig()
if token.tipo == "identifier": # nombre de variable
gram.sum(token)
token = self._sig()
if token.valor == "=":
gram.sum(token)
exp = self.compila_expresion()
if exp:
gram.sum(exp)
token = self._sig()
if token.valor == ";":
gram.sum(token)
return gram
elif token.valor == "[":
gram.sum(token)
exp = self.compila_expresion()
if exp:
gram.sum(exp)
token = self._sig()
if token.valor == "]":
gram.sum(token)
token = self._sig()
if token.valor == "=":
gram.sum(token)
exp = self.compila_expresion()
if exp:
gram.sum(exp)
token = self._sig()
if token.valor == ";":
gram.sum(token)
return gram
self._repos(ini)
return None
def compila_if(self):
ini = self._pos
gram = Gramatica("ifStatement")
token = self._sig()
if token.valor == "if":
gram.sum(token)
token = self._sig()
if token.valor == "(":
gram.sum(token)
exp = self.compila_expresion()
if exp:
gram.sum(exp)
token = self._sig()
if token.valor == ")":
gram.sum(token)
token = self._sig()
if token.valor == "{":
gram.sum(token)
stm = self.compila_declaraciones()
if stm:
gram.sum(stm)
token = self._sig()
if token.valor == "}":
gram.sum(token)
token = self._sig()
if token.valor != "else":
self._dev()
return gram
else:
gram.sum(token)
token = self._sig()
if token.valor == "{":
gram.sum(token)
stm = self.compila_declaraciones()
if stm:
gram.sum(stm)
token = self._sig()
if token.valor == "}":
gram.sum(token)
return gram
self._repos(ini)
return None
def compila_while(self):
ini = self._pos
gram = Gramatica("whileStatement")
token = self._sig()
if token.valor == "while":
gram.sum(token)
token = self._sig()
if token.valor == "(":
gram.sum(token)
exp = self.compila_expresion()
if exp:
gram.sum(exp)
token = self._sig()
if token.valor == ")":
gram.sum(token)
token = self._sig()
if token.valor == "{":
gram.sum(token)
stm = self.compila_declaraciones()
if stm:
gram.sum(stm)
token = self._sig()
if token.valor == "}":
gram.sum(token)
return gram
self._repos(ini)
return None
def compila_do(self):
ini = self._pos
gram = Gramatica("doStatement")
token = self._sig()
if token.valor == "do":
gram.sum(token)
token = self._sig()
if token.tipo == "identifier":
gram.sum(token)
token = self._sig()
if token.valor == "(":
gram.sum(token)
exl = self.compila_lista_expresion()
if exl:
gram.sum(exl)
token = self._sig()
if token.valor == ")":
gram.sum(token)
token = self._sig()
if token.valor == ";":
gram.sum(token)
return gram
elif token.valor == ".":
gram.sum(token)
token = self._sig()
if token.tipo == "identifier": # nombre subrutina
gram.sum(token)
token = self._sig()
if token.valor == "(":
gram.sum(token)
exl = self.compila_lista_expresion()
if exl:
gram.sum(exl)
token = self._sig()
if token.valor == ")":
gram.sum(token)
token = self._sig()
if token.valor == ";":
gram.sum(token)
return gram
self._repos(ini)
return None
def compila_return(self):
ini = self._pos
gram = Gramatica("returnStatement")
token = self._sig()
if token.valor == "return":
gram.sum(token)
exp = self.compila_expresion()
if exp:
gram.sum(exp)
token = self._sig()
if token.valor == ";":
gram.sum(token)
return gram
self._repos(ini)
return None
def compila_lista_expresion(self):
ini = self._pos
gram = Gramatica("expressionList")
exp = self.compila_expresion()
if exp:
gram.sum(exp)
token = self._sig()
while token.valor == ",":
exp = self.compila_expresion()
if exp:
gram.sum(token)
gram.sum(exp)
token = self._sig()
else:
self._dev()
return gram
self._dev()
return gram
return gram
def compila_expresion(self):
ini = self._pos
gram = Gramatica("expression")
ter = self.compila_termino()
if ter:
gram.sum(ter)
token = self._sig()
while self.op(token):
ter = self.compila_termino()
if ter:
gram.sum(token)
gram.sum(ter)
token = self._sig()
else:
self._dev()
return gram
self._dev()
return gram
self._repos(ini)
return None
def compila_termino(self):
ini = self._pos
gram = Gramatica("term")
token = self._sig()
if token.tipo == "integerConstant":
gram.sum(token)
return gram
elif token.tipo == "stringConstant":
gram.sum(token)
return gram
elif self.keywordConstant(token):
gram.sum(token)
return gram
elif token.valor == "(":
gram.sum(token)
exp = self.compila_expresion()
if exp:
gram.sum(exp)
token = self._sig()
if token.valor == ")":
gram.sum(token)
return gram
elif self.unaryOp(token):
gram.sum(token)
ter = self.compila_termino()
if ter:
gram.sum(ter)
return gram
elif token.tipo == "identifier": #nombre Subrutina, clase, variable, variable(arreglo)
gram.sum(token)
token = self._sig()
if token.valor == "[":
gram.sum(token)
exp = self.compila_expresion()
if exp:
gram.sum(exp)
token = self._sig()
if token.valor == "]":
gram.sum(token)
return gram
elif token.valor == "(":
gram.sum(token)
exl = self.compila_lista_expresion()
if exl:
gram.sum(exl)
token = self._sig()
if token.valor == ")":
gram.sum(token)
return gram
elif token.valor == ".":
gram.sum(token)
token = self._sig()
if token.tipo == "identifier": # nombre subrutina
gram.sum(token)
token = self._sig()
if token.valor == "(":
gram.sum(token)
exl = self.compila_lista_expresion()
if exl:
gram.sum(exl)
token = self._sig()
if token.valor == ")":
gram.sum(token)
return gram
else:
self._dev()
return gram
self._repos(ini)
return None
def _sig(self):
self._pos = self._pos + 1
return self._tokens[self._pos-1]
def _dev(self):
self._pos = self._pos - 1
def _repos(self, pos):
self._pos = pos
def keywordConstant(self, token):
return token.valor in ["true", "false", "null", "this"]
def op(self, token):
return token.valor in ["+", "-", "*", "/", "&", "|", "<", ">", "="]
def unaryOp(self, token):
return token.valor in ["-", "~"]
def tipo(self, token):
return token.valor in ['int', 'char', 'boolean'] or token.tipo == "identifier"
def parametro(self, token1, token2):
return self.tipo(token1) and token2.tipo == "identifier"
an = Analizer(r'C:\Users\idop7\Desktop\nand2tetris\projects\10\ArrayTest\Main.jack')
an.analizar()
an.escribir()
print(an)
```
## Parte 4: Tabla de Simbolos
```
class TokenVar:
def __init__(self, token, cat, tipo=None, indice=None, rol=None):
self.valor = token.valor
self.tipo = token.tipo
if cat == "field":
self.cat = "this"
else:
self.cat = cat
if cat not in ['class', 'subroutine']:
self.tipo_dato = tipo
self.ind = indice
self.rol = rol
def __str__(self):
if self.cat == "this":
cat = "field"
else:
cat = self.cat
if cat not in ['class', 'subroutine']:
return f"<{self.tipo}> {self.valor} </{self.tipo}> {[cat, self.tipo_dato, self.ind, self.rol]}\n"
else:
return f"<{self.tipo}> {self.valor} </{self.tipo}> {[cat]}\n"
def clase(self):
return "tokenVar"
tok = Token('main', 'identifier')
var = TokenVar(tok, 'variable', 'no', 3, 'def')
var2 = TokenVar(tok, 'class')
print(var)
print(var2)
class SymbolTable:
def __init__(self):
self._tabla = [{"field": [], "static": []}]
def __str__(self):
txt = ''
for dic in self._tabla:
for key, value in dic.items():
for ind, row in enumerate(value):
txt = txt + str(row+[key, ind]) + "\n"
return txt
def sub(self):
self._tabla.append({"argument": [], "local": []})
def borraSub(self):
self._tabla.pop()
def add(self, token, tipo, clase):
if clase in ["field", "static"]:
self._tabla[0][clase].append([token.valor, tipo])
else:
self._tabla[-1][clase].append([token.valor, tipo])
def trae(self, token, clase):
if clase in ["field", "static"]:
tabla = self._tabla[0][clase]
else:
tabla = self._tabla[-1][clase]
for ind, reg in enumerate(tabla):
if reg[0] == token.valor:
return TokenVar(token, clase, reg[1], ind, 'def')
def busca(self, token):
tabla = dict(self._tabla[0], **self._tabla[-1])
for clase, value in tabla.items():
for ind, reg in enumerate(value):
if reg[0] == token.valor:
return TokenVar(token, clase, reg[1], ind, 'use')
st = SymbolTable()
st.subrutina()
st.add('x', 'int', 'field')
st.add('y', 'int', 'field')
st.add('pointcount', 'point', 'static')
print(st)
st.subrutina()
st.add('this', 'point', 'argument')
st.add('other', 'point', 'argument')
st.add('dx', 'int', 'local')
st.add('dy', 'int', 'local')
print(st)
print(st.busca(Token('dy', 'identifier'), 'use'))
print(st.busca(Token('other', 'identifier'), 'use'))
print(st.busca(Token('pointcount', 'identifier'), 'use'))
print(st.busca(Token('x', 'identifier'), 'use'))
st.subrutina()
st.add('a', 'point', 'argument')
st.add('b', 'point', 'argument')
st.add('sum', 'int', 'local')
print(st)
print(st.busca(Token('a', 'identifier'), 'use'))
print(st.busca(Token('sum', 'identifier'), 'use'))
print(st.busca(Token('pointcount', 'identifier'), 'use'))
print(st.busca(Token('x', 'identifier'), 'use'))
print(st.busca(Token('dx', 'identifier'), 'use'))
```
## Parte 5: Analizador Completo
```
class FullAnalizer:
def __init__(self,ruta):
self._ruta = ruta
tokenizer = Tokenizer(ruta)
tokenizer.separar()
self._tokens = tokenizer.get_lista_tokens()
self._tabla = SymbolTable()
self._pos = 0
self._gramaticas = None
def __str__(self):
return str(self._gramaticas)
def analizar(self):
self._gramaticas = self.compila_clase()
if not self._gramaticas:
raise NameError(f"ERROR: Hay un error de sintaxis. Por favor revise su código")
def escribir(self):
archivo = open(self._ruta.replace(".jack", "ST_.xml"), 'w')
archivo.write(str(self))
archivo.close()
def compila_clase(self):
ini = self._pos
gram = Gramatica("class")
token = self._sig()
if token.valor == "class":
gram.sum(token)
token = self._sig()
if token.tipo == "identifier":
gram.sum(TokenVar(token, 'class')) # Se especifica que es el nombre de una clase
token = self._sig()
if token.valor == "{":
gram.sum(token)
varc = self.compila_variables_clase()
while varc:
gram.sum(varc)
varc = self.compila_variables_clase()
dsr = self.compila_declaracion_subrutina()
while dsr:
gram.sum(dsr)
dsr = self.compila_declaracion_subrutina()
token = self._sig()
if token.valor == "}":
gram.sum(token)
return gram
self._repos(ini)
return None
def compila_variables_clase(self):
ini = self._pos
gram = Gramatica("classVarDec")
token = self._sig()
if token.valor in ['static', 'field']:
cat = token.valor;
gram.sum(token)
token = self._sig()
if self.tipo(token):
if token.tipo == "identifier": # Se mira si el tipo declarado es una clase
token = TokenVar(token, 'class')
tipo = token.valor
gram.sum(token)
token = self._sig()
if token.tipo == "identifier":
self._tabla.add(token, tipo, cat) # Se agrega la variable a la tabla
token = self._tabla.trae(token, cat) # Se busca el indice del token en la tabla
gram.sum(token)
token = self._sig()
while token.valor == ",":
tok_aux = self._sig()
if tok_aux.tipo == "identifier":
self._tabla.add(tok_aux, tipo, cat) # Se agrega la variable a la tabla
tok_aux = self._tabla.trae(tok_aux, cat) # Se busca el indice del token en la tabla
gram.sum(token)
gram.sum(tok_aux)
token = self._sig()
else:
self._dev()
self._dev()
return None
if token.valor == ";":
gram.sum(token)
return gram
self._repos(ini)
return None
def compila_declaracion_subrutina(self):
ini = self._pos
gram = Gramatica("subroutineDec")
self._tabla.sub() # Se añade una nueva subrutina
token = self._sig()
if token.valor in ['constructor', 'function', 'method']:
gram.sum(token)
if token.valor == 'method':
self._tabla.add(Token('this', 'keyword'), 'pointer', 'argument')
token = self._sig()
if self.tipo(token) or token.valor == "void":
if token.tipo == "identifier": # Se mira si el tipo declarado es una clase
token = TokenVar(token, 'class')
gram.sum(token)
token = self._sig()
if token.tipo == "identifier":
gram.sum(TokenVar(token, 'subroutine'))
token = self._sig()
if token.valor == "(":
gram.sum(token)
gram.sum(self.compila_parametros())
token = self._sig()
if token.valor == ")":
gram.sum(token)
csr = self.compila_cuerpo_subrutina()
if csr:
gram.sum(csr)
return gram
self._repos(ini)
return None;
def compila_parametros(self):
ini = self._pos
gram = Gramatica("parameterList")
token1 = self._sig()
token2 = self._sig()
if self.parametro(token1, token2):
if token1.tipo == "identifier": # Se mira si el tipo declarado es una clase
token1 = TokenVar(token1, 'class')
tipo = token1.valor
self._tabla.add(token2, tipo, "argument") # Se agrega la variable a la tabla
token2 = self._tabla.trae(token2, "argument") # Se busca el indice del token en la tabla
gram.sum(token1)
gram.sum(token2)
token = self._sig()
while token.valor == ",":
token1 = self._sig()
token2 = self._sig()
if self.parametro(token1, token2):
if token1.tipo == "identifier": # Se mira si el tipo declarado es una clase
token1 = TokenVar(token1, 'class')
tipo = token1.valor
self._tabla.add(token2, tipo, "argument") # Se agrega la variable a la tabla
token2 = self._tabla.trae(token2, "argument") # Se busca el indice del token en la tabla
gram.sum(token)
gram.sum(token1)
gram.sum(token2)
token = self._sig()
else:
self._dev()
self._dev()
self._dev()
return gram
self._dev()
return gram
self._repos(ini)
return gram
def compila_cuerpo_subrutina(self):
ini = self._pos
gram = Gramatica("subroutineBody")
token = self._sig()
if token.valor == "{":
gram.sum(token)
var = self.compila_variable()
while var:
gram.sum(var)
var = self.compila_variable()
gram.sum(self.compila_declaraciones())
token = self._sig()
if token.valor == "}":
gram.sum(token)
return gram
self._repos(ini)
return None
def compila_variable(self):
ini = self._pos
gram = Gramatica("varDec")
token = self._sig()
if token.valor == "var":
gram.sum(token)
token = self._sig()
if self.tipo(token):
if token.tipo == "identifier": # Se mira si el tipo declarado es una clase
token = TokenVar(token, 'class')
tipo = token.valor
gram.sum(token)
token = self._sig()
if token.tipo == "identifier":
self._tabla.add(token, tipo, 'local') # Se agrega la variable a la tabla
token = self._tabla.trae(token, 'local') # Se busca el indice del token en la tabla
gram.sum(token)
token = self._sig()
while token.valor == ",":
tok_aux = self._sig()
if tok_aux.tipo == "identifier":
self._tabla.add(tok_aux, tipo, 'local') # Se agrega la variable a la tabla
tok_aux = self._tabla.trae(tok_aux, 'local') # Se busca el indice del token en la tabla
gram.sum(token)
gram.sum(tok_aux)
token = self._sig()
else:
self._dev()
self._dev()
return None
if token.valor == ";":
gram.sum(token)
return gram
self._repos(ini)
return None
def compila_declaraciones(self):
ini = self._pos
gram = Gramatica("statements")
dec = self.compila_declaracion()
while dec:
gram.sum(dec)
dec = self.compila_declaracion()
return gram
def compila_declaracion(self):
token = self._sig()
self._dev()
if token.valor == "let":
return self.compila_let()
elif token.valor == "if":
return self.compila_if()
elif token.valor == "while":
return self.compila_while()
elif token.valor == "do":
return self.compila_do()
elif token.valor == "return":
return self.compila_return()
else:
return None
def compila_let(self):
ini = self._pos
gram = Gramatica("letStatement")
token = self._sig()
if token.valor == "let":
gram.sum(token)
token = self._sig()
if token.tipo == "identifier":
token = self._tabla.busca(token)
gram.sum(token)
token = self._sig()
if token.valor == "=":
gram.sum(token)
exp = self.compila_expresion()
if exp:
gram.sum(exp)
token = self._sig()
if token.valor == ";":
gram.sum(token)
return gram
elif token.valor == "[":
gram.sum(token)
exp = self.compila_expresion()
if exp:
gram.sum(exp)
token = self._sig()
if token.valor == "]":
gram.sum(token)
token = self._sig()
if token.valor == "=":
gram.sum(token)
exp = self.compila_expresion()
if exp:
gram.sum(exp)
token = self._sig()
if token.valor == ";":
gram.sum(token)
return gram
self._repos(ini)
return None
def compila_if(self):
ini = self._pos
gram = Gramatica("ifStatement")
token = self._sig()
if token.valor == "if":
gram.sum(token)
token = self._sig()
if token.valor == "(":
gram.sum(token)
exp = self.compila_expresion()
if exp:
gram.sum(exp)
token = self._sig()
if token.valor == ")":
gram.sum(token)
token = self._sig()
if token.valor == "{":
gram.sum(token)
stm = self.compila_declaraciones()
if stm:
gram.sum(stm)
token = self._sig()
if token.valor == "}":
gram.sum(token)
token = self._sig()
if token.valor != "else":
self._dev()
return gram
else:
gram.sum(token)
token = self._sig()
if token.valor == "{":
gram.sum(token)
stm = self.compila_declaraciones()
if stm:
gram.sum(stm)
token = self._sig()
if token.valor == "}":
gram.sum(token)
return gram
self._repos(ini)
return None
def compila_while(self):
ini = self._pos
gram = Gramatica("whileStatement")
token = self._sig()
if token.valor == "while":
gram.sum(token)
token = self._sig()
if token.valor == "(":
gram.sum(token)
exp = self.compila_expresion()
if exp:
gram.sum(exp)
token = self._sig()
if token.valor == ")":
gram.sum(token)
token = self._sig()
if token.valor == "{":
gram.sum(token)
stm = self.compila_declaraciones()
if stm:
gram.sum(stm)
token = self._sig()
if token.valor == "}":
gram.sum(token)
return gram
self._repos(ini)
return None
def compila_do(self):
ini = self._pos
gram = Gramatica("doStatement")
token = self._sig()
if token.valor == "do":
gram.sum(token)
token = self._sig()
if token.tipo == "identifier":
ti = token
token = self._sig()
if token.valor == "(":
gram.sum(TokenVar(ti, 'subroutine'))
gram.sum(token)
exl = self.compila_lista_expresion()
if exl:
gram.sum(exl)
token = self._sig()
if token.valor == ")":
gram.sum(token)
token = self._sig()
if token.valor == ";":
gram.sum(token)
return gram
elif token.valor == ".":
busqueda = self._tabla.busca(ti) # mira si es el nombre de una variable o una clase
if busqueda:
gram.sum(busqueda)
else:
gram.sum(TokenVar(ti, 'class'))
gram.sum(token)
token = self._sig()
if token.tipo == "identifier":
gram.sum(TokenVar(token, 'subroutine'))
token = self._sig()
if token.valor == "(":
gram.sum(token)
exl = self.compila_lista_expresion()
if exl:
gram.sum(exl)
token = self._sig()
if token.valor == ")":
gram.sum(token)
token = self._sig()
if token.valor == ";":
gram.sum(token)
return gram
self._repos(ini)
return None
def compila_return(self):
ini = self._pos
gram = Gramatica("returnStatement")
token = self._sig()
if token.valor == "return":
gram.sum(token)
exp = self.compila_expresion()
if exp:
gram.sum(exp)
token = self._sig()
if token.valor == ";":
gram.sum(token)
return gram
self._repos(ini)
return None
def compila_lista_expresion(self):
ini = self._pos
gram = Gramatica("expressionList")
exp = self.compila_expresion()
if exp:
gram.sum(exp)
token = self._sig()
while token.valor == ",":
exp = self.compila_expresion()
if exp:
gram.sum(token)
gram.sum(exp)
token = self._sig()
else:
self._dev()
return gram
self._dev()
return gram
return gram
def compila_expresion(self):
ini = self._pos
gram = Gramatica("expression")
ter = self.compila_termino()
if ter:
gram.sum(ter)
token = self._sig()
while self.op(token):
ter = self.compila_termino()
if ter:
gram.sum(token)
gram.sum(ter)
token = self._sig()
else:
self._dev()
return gram
self._dev()
return gram
self._repos(ini)
return None
def compila_termino(self):
ini = self._pos
gram = Gramatica("term")
token = self._sig()
if token.tipo == "integerConstant":
gram.sum(token)
return gram
elif token.tipo == "stringConstant":
gram.sum(token)
return gram
elif self.keywordConstant(token):
gram.sum(token)
return gram
elif token.valor == "(":
gram.sum(token)
exp = self.compila_expresion()
if exp:
gram.sum(exp)
token = self._sig()
if token.valor == ")":
gram.sum(token)
return gram
elif self.unaryOp(token):
gram.sum(token)
ter = self.compila_termino()
if ter:
gram.sum(ter)
return gram
elif token.tipo == "identifier":
ti = token # Guardo momentariamente el token
token = self._sig()
if token.valor == "[":
gram.sum(self._tabla.busca(ti)) # Busco el token en la tabla porque es una variable
gram.sum(token)
exp = self.compila_expresion()
if exp:
gram.sum(exp)
token = self._sig()
if token.valor == "]":
gram.sum(token)
return gram
elif token.valor == "(":
gram.sum(TokenVar(ti, 'subroutine')) # Añado el token como subrutina
gram.sum(token)
exl = self.compila_lista_expresion()
if exl:
gram.sum(exl)
token = self._sig()
if token.valor == ")":
gram.sum(token)
return gram
elif token.valor == ".":
busqueda = self._tabla.busca(ti) # mira si es el nombre de una variable o una clase
if busqueda:
gram.sum(busqueda)
else:
gram.sum(TokenVar(ti, 'class'))
#gram.sum(TokenVar(ti, 'class')) # Añado el token como clase
gram.sum(token)
token = self._sig()
if token.tipo == "identifier":
gram.sum(TokenVar(token, 'subroutine')) # Añado el token como subrutina
token = self._sig()
if token.valor == "(":
gram.sum(token)
exl = self.compila_lista_expresion()
if exl:
gram.sum(exl)
token = self._sig()
if token.valor == ")":
gram.sum(token)
return gram
else:
gram.sum(self._tabla.busca(ti)) # Busco el token en la tabla porque es una variable
self._dev()
return gram
self._repos(ini)
return None
def get_reglas(self):
return self._gramaticas
def _sig(self):
self._pos = self._pos + 1
return self._tokens[self._pos-1]
def _dev(self):
self._pos = self._pos - 1
def _repos(self, pos):
self._pos = pos
def keywordConstant(self, token):
return token.valor in ["true", "false", "null", "this"]
def op(self, token):
return token.valor in ["+", "-", "*", "/", "&", "|", "<", ">", "="]
def unaryOp(self, token):
return token.valor in ["-", "~"]
def tipo(self, token):
return token.valor in ['int', 'char', 'boolean'] or token.tipo == "identifier"
def parametro(self, token1, token2):
return self.tipo(token1) and token2.tipo == "identifier"
an = FullAnalizer(r'C:\Users\idop7\Desktop\nand2tetris\projects\10\prueba.jack')
an.analizar()
print(an)
an = FullAnalizer(r'C:\Users\idop7\Desktop\nand2tetris\projects\11\Pong\Main.jack')
an.analizar()
an.
print(an)
```
## Parte 5: Generador de codigo
```
class TextGenedator:
def __init__(self, ruta):
fa = FullAnalizer(ruta)
fa.analizar()
self._gramaticas = fa.get_reglas() # Nodo clase del árbol
self._vm = []
self._clase = ''
self._subrutinas = {}
self.nif = 0
self.nwhile = 0
def __str__(self):
txt = ''
for ins in self._vm:
txt = txt + ins + "\n"
return txt
def generar(self):
sub = self._gramaticas.get_sub() # Nodos hijos del nodo clase:
self.define_metodos(sub)
self._clase = sub[1].valor # Guarda el nombre de la clase
if len(sub)-4 > 0:
for i in range(3, len(sub)-1):
subdec = sub[i].get_sub()
if sub[i].valor == "subroutineDec":
self._vm.append(f"function {self._clase}.{subdec[2].valor} {self._subrutinas[subdec[2].valor][2]}")
#input("Stop")
if subdec[0].valor == "method":
self._vm.append("push argument 0")
self._vm.append("pop pointer 0")
if subdec[0].valor == "constructor":
self._vm.append(f"push constant {self.cuenta_campos(sub)}")
self._vm.append("call Memory.alloc 1")
self._vm.append("pop pointer 0")
body = subdec[-1].get_sub() # Entra a los hijos de subroutineBody
self.escribe_cuerpo(body[1:-1]) # Solo se para como parametro los Statement y Clases
def escribir():
archivo = open(self._ruta.replace(".jack", "_.vm"), 'w')
archivo.write(prog)
archivo.close()
self.vaciar_programa()
def define_metodos(self, sub):
if len(sub)-4 > 0:
for i in range(3, len(sub)-1):
subdec = sub[i].get_sub()
if sub[i].valor == "subroutineDec":
subdec = sub[i].get_sub() # Entra a los nodos hijos del primer subroutineDec
nom_subrut = subdec[2].valor
fcm_subrut = subdec[0].valor
tipo_subrut = subdec[1].valor
if fcm_subrut == "method":
param = subdec[4].get_sub() # Enrta a los nodos hijos de parameter list
param = len(param)//3 + 1 + (nom_subrut == "method") # Encuentra el número de parametros a pedir
elif fcm_subrut == "function":
param = self.cuenta_parametros(subdec[-1].get_sub())
else:
param = 0
self._subrutinas[nom_subrut] = [fcm_subrut, tipo_subrut, param]
def cuenta_parametros(self, body):
params = 0
for bdy in body:
if bdy.valor == "varDec":
params = params + len(bdy.get_sub()[1:-1])/2
return int(params)
def cuenta_campos(self, sub):
campos = 0
for s in sub:
if s.valor == "classVarDec":
campos = campos + len(s.get_sub()[1:-1])/2
return int(campos)
def escribe_cuerpo(self, body):
for bdy in body:
if bdy.valor == "statements":
stat = bdy.get_sub()
self.escribe_declaraciones(stat)
def escribe_declaraciones(self, sts):
ni = self.nif
nw = self.nwhile
for dec in sts:
if dec.valor == "letStatement":
self.escribe_let(dec.get_sub())
elif dec.valor == "ifStatement":
self.nif = self.nif + 1
self.escribe_if(dec.get_sub(), ni)
elif dec.valor == "whileStatement":
self.nwhile = self.nwhile + 1
self.escribe_while(dec.get_sub(), nw)
elif dec.valor == "doStatement":
self.escribe_do(dec.get_sub())
elif dec.valor == "returnStatement":
self.escribe_return(dec.get_sub())
def escribe_let(self, let):
if(len(let) == 8):
self.escribe_expresion(let[3].get_sub())
self._vm.append(f"push {let[1].cat} {let[1].ind}")
self._vm.append("add")
self.escribe_expresion(let[6].get_sub())
self._vm.append("pop temp 0")
self._vm.append("pop pointer 1")
self._vm.append("push temp 0")
self._vm.append("pop that 0")
#self._vm.append("-----------------------")
else:
self.escribe_expresion(let[3].get_sub())
self._vm.append(f"pop {let[1].cat} {let[1].ind}")
def escribe_if(self, ifs, n):
self.escribe_expresion(ifs[2].get_sub())
if len(ifs) == 11:
self._vm.append("if-goto IF-TRUE"+str(n))
self._vm.append("goto IF-FALSE"+str(n))
self._vm.append("label IF-TRUE"+str(n))
self.escribe_declaraciones(ifs[5].get_sub())
self._vm.append("goto IF-END"+str(n))
self._vm.append("label IF-FALSE"+str(n))
self.escribe_declaraciones(ifs[9].get_sub())
self._vm.append("label IF-END"+str(n))
else:
self._vm.append("not")
self._vm.append("if-goto IF-END"+str(n))
self.escribe_declaraciones(ifs[5].get_sub())
self._vm.append("label IF-END"+str(n))
def escribe_while(self, whiles, n):
self._vm.append("label WHILE-EXP"+str(n))
self.escribe_expresion(whiles[2].get_sub())
self._vm.append("not")
self._vm.append("if-goto WHILE-END"+str(n))
self.escribe_declaraciones(whiles[5].get_sub())
self._vm.append("goto WHILE-EXP"+str(n))
self._vm.append("label WHILE-END"+str(n))
def escribe_do(self, do):
if do[1].cat == "subroutine":
par = self._subrutinas[do[1].valor][-1]
if self._subrutinas[do[1].valor][0] == "method":
self._vm.append("push pointer 0")
par = par + 1
self.escribe_lista_expresiones(do[3].get_sub())
self._vm.append(f"call {self._clase}.{do[1].valor} {par}")
self._vm.append("pop temp 0")
elif do[1].cat == "class":
self.escribe_lista_expresiones(do[5].get_sub())
self._vm.append(f"call {do[1].valor}.{do[3].valor} {(len(do[5].get_sub())+1)//2}")
self._vm.append("pop temp 0")
else:
self._vm.append(f"push {do[1].cat} {do[1].ind}")
self.escribe_lista_expresiones(do[5].get_sub())
self._vm.append(f"call {do[1].tipo_dato}.{do[3].valor} {(len(do[5].get_sub())+1)//2}")
self._vm.append("pop temp 0")
def escribe_return(self, returns):
if len(returns) == 2:
self._vm.append("push constant 0")
elif returns[1].valor == 'this':
self._vm.append("push pointer 0")
else:
self.escribe_expresion(returns[1].get_sub())
self._vm.append("return")
def escribe_lista_expresiones(self, lista):
if len(lista) > 0:
self.escribe_expresion(lista[0].get_sub())
for ind in range(2, len(lista), 2):
self.escribe_expresion(lista[ind].get_sub())
def escribe_expresion(self, exp):
self.escribe_termino(exp[0].get_sub())
for ind in range(2,len(exp),2):
self.escribe_termino(exp[ind].get_sub())
self._vm.append(self._op(exp[ind-1].valor))
def escribe_termino(self, term):
tok1 = term[0]
if tok1.tipo == "integerConstant":
self._vm.append(f"push constant {tok1.valor}")
elif tok1.tipo == "stringConstant":
self._vm.append(f"push constant {len(tok1.valor)}")
self._vm.append("call String.new 1")
for c in tok1.valor:
self._vm.append(f"push constant {ord(c)}")
self._vm.append(f"call String.appendChar 2")
elif tok1.valor == "true":
self._vm.append("push constant 0")
self._vm.append("not")
elif tok1.valor == "false":
self._vm.append("push constant 0")
elif tok1.valor == "null":
self._vm.append("push constant 0")
elif tok1.valor == "this":
self._vm.append("push pointer 0")
elif tok1.valor == "(":
self.escribe_expresion(term[1].get_sub())
elif tok1.tipo == "symbol":
self.escribe_termino(term[1].get_sub())
self._vm.append(self._un(tok1.valor))
elif tok1.cat == "subroutine":
if self._subrutinas[term[0].valor][0] == "method":
self._vm.append("push pointer 0")
self.escribe_lista_expresiones(ter[2].get_sub())
self._vm.append(f"call {self._clase}.{term[0].valor} {self._subrutinas[term[0].valor][-1]}")
if self._subrutinas[term[0].valor][0] != "method":
self._vm.append("pop temp 0")
elif tok1.cat == "class":
self.escribe_lista_expresiones(term[4].get_sub())
self._vm.append(f"call {term[0].valor}.{term[2].valor} {(len(term[4].get_sub())+1)//2}")
#self._vm.append("pop temp 0")
elif tok1.cat == "pointer" and len(term) == 6:
self._vm.append(f"push {tok1.cat} {tok1.ind}")
self.escribe_lista_expresiones(term[4].get_sub())
self._vm.append(f"call corregir.{term[2].valor} {(len(term[4].get_sub())+1)//2}")
elif len(term) == 4:
self.escribe_expresion(term[2].get_sub())
self._vm.append(f"push {tok1.cat} {tok1.ind}")
self._vm.append("add")
self._vm.append("pop pointer 1")
self._vm.append("push that 0")
else:
self._vm.append(f"push {tok1.cat} {tok1.ind}")
def _op(self, operador):
oper = {'+': "add", '-': "sub", '*': "call Math.multiply 2", '/': "call Math.multiply 2", '&': "and", '|':"or", '<':"lt", '>':"gt", '=':"eq"}
return oper[operador]
def _un(self, operador):
ops = {'-': "neg", '~': "not"}
return ops[operador]
vm = TextGenedator(r"C:\Users\idop7\Desktop\nand2tetris\projects\11\ComplexArrays\Main.jack")
vm.generar()
print(vm)
vm = FullAnalizer(r"C:\Users\idop7\Desktop\nand2tetris\projects\11\ComplexArrays\Main.jack")
vm.analizar()
print(vm)
for i in range(3,3):
print(i)
```
|
github_jupyter
|
class Token:
def __init__(self, valor, tipo):
self.valor = valor
if tipo in ["keyword", "symbol", "identifier", "integerConstant", "stringConstant"]:
self.tipo = tipo
else:
raise NameError(f"ERROR: No se puede ingresar un token con tipo {tipo}")
def __str__(self):
if self.valor == "<":
return f"<{self.tipo}> < </{self.tipo}>\n"
elif self.valor == ">":
return f"<{self.tipo}> > </{self.tipo}>\n"
elif self.valor == "&":
return f"<{self.tipo}> & </{self.tipo}>\n"
else:
return f"<{self.tipo}> {self.valor} </{self.tipo}>\n"
def clase(self):
return "Token"
tok = Token(",", "symbol")
print(Token(">", "symbol"))
print(Token(";", "symbol"))
class Tokenizer:
def __init__(self, ruta):
self.cargar(ruta)
def __str__(self):
texto = ""
for token in self._tokens:
texto = texto + str(token)
return texto
def cargar(self, ruta):
self._varciar()
self._ruta = ruta
archivo = open(ruta, 'r')
for linea in archivo.readlines():
linea = linea.strip()
if (linea != '') and (linea[0:2] != "//"):
if linea[0:3] == "/**":
pass
elif linea[0] == "*":
if linea.find("*/"):
pass
elif linea.find('//') != -1:
self._programa = self._programa + linea[:linea.find('//')].strip()
else:
self._programa = self._programa + linea.strip()
archivo.close()
print("Archivo cargado completamente.")
def separar(self):
sep_comillas = self._programa.split('"')
#palabras = [''.join([f" {c} " if self.symbol(c) else c for c in val]).split() if ind%2 == 0 else [f'"{val}"'] for ind, val in enumerate(sep_comillas)]
for ind, var in enumerate(sep_comillas):
if ind%2 == 0:
espaciado = []
for c in var:
if self.symbol(c):
espaciado.append(f" {c} ")
else:
espaciado.append(c)
self._clasificar(''.join(espaciado).split())
else:
self._clasificar([f'"{var}"'])
def _clasificar(self, lista):
for token in lista:
if self.keyword(token):
self._tokens.append(Token(token, "keyword"))
elif self.symbol(token):
self._tokens.append(Token(token, "symbol"))
elif '"' in token:
self._tokens.append(Token(token[1:-1], "stringConstant"))
elif token.isdigit():
self._tokens.append(Token(token, "integerConstant"))
elif not token[0].isdigit():
self._tokens.append(Token(token, "identifier"))
else:
raise NameError(f"ERROR: Declaración de la variable {token} no fue realizada correctamente.")
def escribir(self):
archivo = open(self._ruta.replace(".jack", "T_.xml"), 'w')
archivo.write(str(self))
archivo.close()
self._varciar()
def _varciar(self):
self._programa = ""
self._tokens = []
def get_programa(self):
return self._programa
def get_lista_tokens(self):
return self._tokens
def keyword(self, texto):
return texto in ["class", "constructor", "function", "method", "field", "static", "var", "int", "char", "boolean", "void", "true", "false", "null", "this", "let", "do", "if", "else", "while", "return"]
def symbol(self, texto):
return texto in ["{", "}", "(", ")", "[", "]", ".", ",", ";", "+", "-", "*", "/", "&", "|", "<", ">", "=", "~"]
tok = Tokenizer(r'C:\Users\idop7\Desktop\nand2tetris\projects\10\ArrayTest\Main.jack')
#print(tok)
tok.separar()
tok.escribir()
print(tok)
class Gramatica:
def __init__(self,nombre):
self.valor = nombre
self._reglas = []
def __str__(self):
texto = f"<{self.valor}>\n"
for regla in self._reglas:
if regla.clase() == "Gramatica":
txt = str(regla).split('\n')[:-1]
txt = ["\t"+t+"\n" for t in txt]
texto = texto + ''.join(txt)
else:
texto = texto + '\t' + str(regla)
return texto + f"</{self.valor}>\n"
def sum(self, regla):
self._reglas.append(regla)
def get_nombre(self):
return self._nombre
def get_reglas(self):
return self._reglas
def get_sub(self):
return self._reglas
def clase(self):
return "Gramatica"
gram = Gramatica('clase')
gram.sum(Token('class', 'keyword'))
gram.sum(Token('main', 'identifier'))
gram2 = Gramatica('registro')
gram2.sum(gram)
gram2.sum(Token(';', 'symbol'))
print(gram2)
class Analizer:
def __init__(self,ruta):
self._ruta = ruta
tokenizer = Tokenizer(ruta)
tokenizer.separar()
self._tokens = tokenizer.get_lista_tokens()
self._pos = 0
self._gramaticas = None
def __str__(self):
return str(self._gramaticas)
def analizar(self):
self._gramaticas = self.compila_clase()
if not self._gramaticas:
raise NameError(f"ERROR: Hay un error de sintaxis. Por favor revise su código")
def escribir(self):
archivo = open(self._ruta.replace(".jack", "_.xml"), 'w')
archivo.write(str(self))
archivo.close()
def compila_clase(self):
ini = self._pos
gram = Gramatica("class")
token = self._sig()
if token.valor == "class":
gram.sum(token)
token = self._sig()
if token.tipo == "identifier":
gram.sum(token)
token = self._sig()
if token.valor == "{":
gram.sum(token)
varc = self.compila_variables_clase()
while varc:
gram.sum(varc)
varc = self.compila_variables_clase()
dsr = self.compila_declaracion_subrutina()
while dsr:
gram.sum(dsr)
dsr = self.compila_declaracion_subrutina()
token = self._sig()
if token.valor == "}":
gram.sum(token)
return gram
self._repos(ini)
return None
def compila_variables_clase(self):
ini = self._pos
gram = Gramatica("classVarDec")
token = self._sig()
if token.valor in ['static', 'field']:
gram.sum(token)
token = self._sig()
if self.tipo(token):
gram.sum(token)
token = self._sig()
if token.tipo == "identifier":
gram.sum(token)
token = self._sig()
while token.valor == ",":
tok_aux = self._sig()
if tok_aux.tipo == "identifier":
gram.sum(token)
gram.sum(tok_aux)
token = self._sig()
else:
self._dev()
self._dev()
return None
if token.valor == ";":
gram.sum(token)
return gram
self._repos(ini)
return None
def compila_declaracion_subrutina(self):
ini = self._pos
gram = Gramatica("subroutineDec")
token = self._sig()
if token.valor in ['constructor', 'function', 'method']:
gram.sum(token)
token = self._sig()
if self.tipo(token) or token.valor == "void":
gram.sum(token)
token = self._sig()
if token.tipo == "identifier":
gram.sum(token)
token = self._sig()
if token.valor == "(":
gram.sum(token)
gram.sum(self.compila_parametros())
token = self._sig()
if token.valor == ")":
gram.sum(token)
csr = self.compila_cuerpo_subrutina()
if csr:
gram.sum(csr)
return gram
self._repos(ini)
return None;
def compila_parametros(self):
ini = self._pos
gram = Gramatica("parameterList")
token1 = self._sig()
token2 = self._sig()
if self.parametro(token1, token2):
gram.sum(token1)
gram.sum(token2)
token = self._sig()
while token.valor == ",":
token1 = self._sig()
token2 = self._sig()
if self.parametro(token1, token2):
gram.sum(token)
gram.sum(token1)
gram.sum(token2)
token = self._sig()
else:
self._dev()
self._dev()
self._dev()
return gram
self._dev()
return gram
self._repos(ini)
return gram
def compila_cuerpo_subrutina(self):
ini = self._pos
gram = Gramatica("subroutineBody")
token = self._sig()
if token.valor == "{":
gram.sum(token)
var = self.compila_variable()
while var:
gram.sum(var)
var = self.compila_variable()
gram.sum(self.compila_declaraciones())
token = self._sig()
if token.valor == "}":
gram.sum(token)
return gram
self._repos(ini)
return None
def compila_variable(self):
ini = self._pos
gram = Gramatica("varDec")
token = self._sig()
if token.valor == "var":
gram.sum(token)
token = self._sig()
if self.tipo(token):
gram.sum(token)
token = self._sig()
if token.tipo == "identifier":
gram.sum(token)
token = self._sig()
while token.valor == ",":
tok_aux = self._sig()
if tok_aux.tipo == "identifier":
gram.sum(token)
gram.sum(tok_aux)
token = self._sig()
else:
self._dev()
self._dev()
return None
if token.valor == ";":
gram.sum(token)
return gram
self._repos(ini)
return None
def compila_declaraciones(self):
ini = self._pos
gram = Gramatica("statements")
dec = self.compila_declaracion()
while dec:
gram.sum(dec)
dec = self.compila_declaracion()
return gram
def compila_declaracion(self):
token = self._sig()
self._dev()
if token.valor == "let":
return self.compila_let()
elif token.valor == "if":
return self.compila_if()
elif token.valor == "while":
return self.compila_while()
elif token.valor == "do":
return self.compila_do()
elif token.valor == "return":
return self.compila_return()
else:
return None
def compila_let(self):
ini = self._pos
gram = Gramatica("letStatement")
token = self._sig()
if token.valor == "let":
gram.sum(token)
token = self._sig()
if token.tipo == "identifier": # nombre de variable
gram.sum(token)
token = self._sig()
if token.valor == "=":
gram.sum(token)
exp = self.compila_expresion()
if exp:
gram.sum(exp)
token = self._sig()
if token.valor == ";":
gram.sum(token)
return gram
elif token.valor == "[":
gram.sum(token)
exp = self.compila_expresion()
if exp:
gram.sum(exp)
token = self._sig()
if token.valor == "]":
gram.sum(token)
token = self._sig()
if token.valor == "=":
gram.sum(token)
exp = self.compila_expresion()
if exp:
gram.sum(exp)
token = self._sig()
if token.valor == ";":
gram.sum(token)
return gram
self._repos(ini)
return None
def compila_if(self):
ini = self._pos
gram = Gramatica("ifStatement")
token = self._sig()
if token.valor == "if":
gram.sum(token)
token = self._sig()
if token.valor == "(":
gram.sum(token)
exp = self.compila_expresion()
if exp:
gram.sum(exp)
token = self._sig()
if token.valor == ")":
gram.sum(token)
token = self._sig()
if token.valor == "{":
gram.sum(token)
stm = self.compila_declaraciones()
if stm:
gram.sum(stm)
token = self._sig()
if token.valor == "}":
gram.sum(token)
token = self._sig()
if token.valor != "else":
self._dev()
return gram
else:
gram.sum(token)
token = self._sig()
if token.valor == "{":
gram.sum(token)
stm = self.compila_declaraciones()
if stm:
gram.sum(stm)
token = self._sig()
if token.valor == "}":
gram.sum(token)
return gram
self._repos(ini)
return None
def compila_while(self):
ini = self._pos
gram = Gramatica("whileStatement")
token = self._sig()
if token.valor == "while":
gram.sum(token)
token = self._sig()
if token.valor == "(":
gram.sum(token)
exp = self.compila_expresion()
if exp:
gram.sum(exp)
token = self._sig()
if token.valor == ")":
gram.sum(token)
token = self._sig()
if token.valor == "{":
gram.sum(token)
stm = self.compila_declaraciones()
if stm:
gram.sum(stm)
token = self._sig()
if token.valor == "}":
gram.sum(token)
return gram
self._repos(ini)
return None
def compila_do(self):
ini = self._pos
gram = Gramatica("doStatement")
token = self._sig()
if token.valor == "do":
gram.sum(token)
token = self._sig()
if token.tipo == "identifier":
gram.sum(token)
token = self._sig()
if token.valor == "(":
gram.sum(token)
exl = self.compila_lista_expresion()
if exl:
gram.sum(exl)
token = self._sig()
if token.valor == ")":
gram.sum(token)
token = self._sig()
if token.valor == ";":
gram.sum(token)
return gram
elif token.valor == ".":
gram.sum(token)
token = self._sig()
if token.tipo == "identifier": # nombre subrutina
gram.sum(token)
token = self._sig()
if token.valor == "(":
gram.sum(token)
exl = self.compila_lista_expresion()
if exl:
gram.sum(exl)
token = self._sig()
if token.valor == ")":
gram.sum(token)
token = self._sig()
if token.valor == ";":
gram.sum(token)
return gram
self._repos(ini)
return None
def compila_return(self):
ini = self._pos
gram = Gramatica("returnStatement")
token = self._sig()
if token.valor == "return":
gram.sum(token)
exp = self.compila_expresion()
if exp:
gram.sum(exp)
token = self._sig()
if token.valor == ";":
gram.sum(token)
return gram
self._repos(ini)
return None
def compila_lista_expresion(self):
ini = self._pos
gram = Gramatica("expressionList")
exp = self.compila_expresion()
if exp:
gram.sum(exp)
token = self._sig()
while token.valor == ",":
exp = self.compila_expresion()
if exp:
gram.sum(token)
gram.sum(exp)
token = self._sig()
else:
self._dev()
return gram
self._dev()
return gram
return gram
def compila_expresion(self):
ini = self._pos
gram = Gramatica("expression")
ter = self.compila_termino()
if ter:
gram.sum(ter)
token = self._sig()
while self.op(token):
ter = self.compila_termino()
if ter:
gram.sum(token)
gram.sum(ter)
token = self._sig()
else:
self._dev()
return gram
self._dev()
return gram
self._repos(ini)
return None
def compila_termino(self):
ini = self._pos
gram = Gramatica("term")
token = self._sig()
if token.tipo == "integerConstant":
gram.sum(token)
return gram
elif token.tipo == "stringConstant":
gram.sum(token)
return gram
elif self.keywordConstant(token):
gram.sum(token)
return gram
elif token.valor == "(":
gram.sum(token)
exp = self.compila_expresion()
if exp:
gram.sum(exp)
token = self._sig()
if token.valor == ")":
gram.sum(token)
return gram
elif self.unaryOp(token):
gram.sum(token)
ter = self.compila_termino()
if ter:
gram.sum(ter)
return gram
elif token.tipo == "identifier": #nombre Subrutina, clase, variable, variable(arreglo)
gram.sum(token)
token = self._sig()
if token.valor == "[":
gram.sum(token)
exp = self.compila_expresion()
if exp:
gram.sum(exp)
token = self._sig()
if token.valor == "]":
gram.sum(token)
return gram
elif token.valor == "(":
gram.sum(token)
exl = self.compila_lista_expresion()
if exl:
gram.sum(exl)
token = self._sig()
if token.valor == ")":
gram.sum(token)
return gram
elif token.valor == ".":
gram.sum(token)
token = self._sig()
if token.tipo == "identifier": # nombre subrutina
gram.sum(token)
token = self._sig()
if token.valor == "(":
gram.sum(token)
exl = self.compila_lista_expresion()
if exl:
gram.sum(exl)
token = self._sig()
if token.valor == ")":
gram.sum(token)
return gram
else:
self._dev()
return gram
self._repos(ini)
return None
def _sig(self):
self._pos = self._pos + 1
return self._tokens[self._pos-1]
def _dev(self):
self._pos = self._pos - 1
def _repos(self, pos):
self._pos = pos
def keywordConstant(self, token):
return token.valor in ["true", "false", "null", "this"]
def op(self, token):
return token.valor in ["+", "-", "*", "/", "&", "|", "<", ">", "="]
def unaryOp(self, token):
return token.valor in ["-", "~"]
def tipo(self, token):
return token.valor in ['int', 'char', 'boolean'] or token.tipo == "identifier"
def parametro(self, token1, token2):
return self.tipo(token1) and token2.tipo == "identifier"
an = Analizer(r'C:\Users\idop7\Desktop\nand2tetris\projects\10\ArrayTest\Main.jack')
an.analizar()
an.escribir()
print(an)
class TokenVar:
def __init__(self, token, cat, tipo=None, indice=None, rol=None):
self.valor = token.valor
self.tipo = token.tipo
if cat == "field":
self.cat = "this"
else:
self.cat = cat
if cat not in ['class', 'subroutine']:
self.tipo_dato = tipo
self.ind = indice
self.rol = rol
def __str__(self):
if self.cat == "this":
cat = "field"
else:
cat = self.cat
if cat not in ['class', 'subroutine']:
return f"<{self.tipo}> {self.valor} </{self.tipo}> {[cat, self.tipo_dato, self.ind, self.rol]}\n"
else:
return f"<{self.tipo}> {self.valor} </{self.tipo}> {[cat]}\n"
def clase(self):
return "tokenVar"
tok = Token('main', 'identifier')
var = TokenVar(tok, 'variable', 'no', 3, 'def')
var2 = TokenVar(tok, 'class')
print(var)
print(var2)
class SymbolTable:
def __init__(self):
self._tabla = [{"field": [], "static": []}]
def __str__(self):
txt = ''
for dic in self._tabla:
for key, value in dic.items():
for ind, row in enumerate(value):
txt = txt + str(row+[key, ind]) + "\n"
return txt
def sub(self):
self._tabla.append({"argument": [], "local": []})
def borraSub(self):
self._tabla.pop()
def add(self, token, tipo, clase):
if clase in ["field", "static"]:
self._tabla[0][clase].append([token.valor, tipo])
else:
self._tabla[-1][clase].append([token.valor, tipo])
def trae(self, token, clase):
if clase in ["field", "static"]:
tabla = self._tabla[0][clase]
else:
tabla = self._tabla[-1][clase]
for ind, reg in enumerate(tabla):
if reg[0] == token.valor:
return TokenVar(token, clase, reg[1], ind, 'def')
def busca(self, token):
tabla = dict(self._tabla[0], **self._tabla[-1])
for clase, value in tabla.items():
for ind, reg in enumerate(value):
if reg[0] == token.valor:
return TokenVar(token, clase, reg[1], ind, 'use')
st = SymbolTable()
st.subrutina()
st.add('x', 'int', 'field')
st.add('y', 'int', 'field')
st.add('pointcount', 'point', 'static')
print(st)
st.subrutina()
st.add('this', 'point', 'argument')
st.add('other', 'point', 'argument')
st.add('dx', 'int', 'local')
st.add('dy', 'int', 'local')
print(st)
print(st.busca(Token('dy', 'identifier'), 'use'))
print(st.busca(Token('other', 'identifier'), 'use'))
print(st.busca(Token('pointcount', 'identifier'), 'use'))
print(st.busca(Token('x', 'identifier'), 'use'))
st.subrutina()
st.add('a', 'point', 'argument')
st.add('b', 'point', 'argument')
st.add('sum', 'int', 'local')
print(st)
print(st.busca(Token('a', 'identifier'), 'use'))
print(st.busca(Token('sum', 'identifier'), 'use'))
print(st.busca(Token('pointcount', 'identifier'), 'use'))
print(st.busca(Token('x', 'identifier'), 'use'))
print(st.busca(Token('dx', 'identifier'), 'use'))
class FullAnalizer:
def __init__(self,ruta):
self._ruta = ruta
tokenizer = Tokenizer(ruta)
tokenizer.separar()
self._tokens = tokenizer.get_lista_tokens()
self._tabla = SymbolTable()
self._pos = 0
self._gramaticas = None
def __str__(self):
return str(self._gramaticas)
def analizar(self):
self._gramaticas = self.compila_clase()
if not self._gramaticas:
raise NameError(f"ERROR: Hay un error de sintaxis. Por favor revise su código")
def escribir(self):
archivo = open(self._ruta.replace(".jack", "ST_.xml"), 'w')
archivo.write(str(self))
archivo.close()
def compila_clase(self):
ini = self._pos
gram = Gramatica("class")
token = self._sig()
if token.valor == "class":
gram.sum(token)
token = self._sig()
if token.tipo == "identifier":
gram.sum(TokenVar(token, 'class')) # Se especifica que es el nombre de una clase
token = self._sig()
if token.valor == "{":
gram.sum(token)
varc = self.compila_variables_clase()
while varc:
gram.sum(varc)
varc = self.compila_variables_clase()
dsr = self.compila_declaracion_subrutina()
while dsr:
gram.sum(dsr)
dsr = self.compila_declaracion_subrutina()
token = self._sig()
if token.valor == "}":
gram.sum(token)
return gram
self._repos(ini)
return None
def compila_variables_clase(self):
ini = self._pos
gram = Gramatica("classVarDec")
token = self._sig()
if token.valor in ['static', 'field']:
cat = token.valor;
gram.sum(token)
token = self._sig()
if self.tipo(token):
if token.tipo == "identifier": # Se mira si el tipo declarado es una clase
token = TokenVar(token, 'class')
tipo = token.valor
gram.sum(token)
token = self._sig()
if token.tipo == "identifier":
self._tabla.add(token, tipo, cat) # Se agrega la variable a la tabla
token = self._tabla.trae(token, cat) # Se busca el indice del token en la tabla
gram.sum(token)
token = self._sig()
while token.valor == ",":
tok_aux = self._sig()
if tok_aux.tipo == "identifier":
self._tabla.add(tok_aux, tipo, cat) # Se agrega la variable a la tabla
tok_aux = self._tabla.trae(tok_aux, cat) # Se busca el indice del token en la tabla
gram.sum(token)
gram.sum(tok_aux)
token = self._sig()
else:
self._dev()
self._dev()
return None
if token.valor == ";":
gram.sum(token)
return gram
self._repos(ini)
return None
def compila_declaracion_subrutina(self):
ini = self._pos
gram = Gramatica("subroutineDec")
self._tabla.sub() # Se añade una nueva subrutina
token = self._sig()
if token.valor in ['constructor', 'function', 'method']:
gram.sum(token)
if token.valor == 'method':
self._tabla.add(Token('this', 'keyword'), 'pointer', 'argument')
token = self._sig()
if self.tipo(token) or token.valor == "void":
if token.tipo == "identifier": # Se mira si el tipo declarado es una clase
token = TokenVar(token, 'class')
gram.sum(token)
token = self._sig()
if token.tipo == "identifier":
gram.sum(TokenVar(token, 'subroutine'))
token = self._sig()
if token.valor == "(":
gram.sum(token)
gram.sum(self.compila_parametros())
token = self._sig()
if token.valor == ")":
gram.sum(token)
csr = self.compila_cuerpo_subrutina()
if csr:
gram.sum(csr)
return gram
self._repos(ini)
return None;
def compila_parametros(self):
ini = self._pos
gram = Gramatica("parameterList")
token1 = self._sig()
token2 = self._sig()
if self.parametro(token1, token2):
if token1.tipo == "identifier": # Se mira si el tipo declarado es una clase
token1 = TokenVar(token1, 'class')
tipo = token1.valor
self._tabla.add(token2, tipo, "argument") # Se agrega la variable a la tabla
token2 = self._tabla.trae(token2, "argument") # Se busca el indice del token en la tabla
gram.sum(token1)
gram.sum(token2)
token = self._sig()
while token.valor == ",":
token1 = self._sig()
token2 = self._sig()
if self.parametro(token1, token2):
if token1.tipo == "identifier": # Se mira si el tipo declarado es una clase
token1 = TokenVar(token1, 'class')
tipo = token1.valor
self._tabla.add(token2, tipo, "argument") # Se agrega la variable a la tabla
token2 = self._tabla.trae(token2, "argument") # Se busca el indice del token en la tabla
gram.sum(token)
gram.sum(token1)
gram.sum(token2)
token = self._sig()
else:
self._dev()
self._dev()
self._dev()
return gram
self._dev()
return gram
self._repos(ini)
return gram
def compila_cuerpo_subrutina(self):
ini = self._pos
gram = Gramatica("subroutineBody")
token = self._sig()
if token.valor == "{":
gram.sum(token)
var = self.compila_variable()
while var:
gram.sum(var)
var = self.compila_variable()
gram.sum(self.compila_declaraciones())
token = self._sig()
if token.valor == "}":
gram.sum(token)
return gram
self._repos(ini)
return None
def compila_variable(self):
ini = self._pos
gram = Gramatica("varDec")
token = self._sig()
if token.valor == "var":
gram.sum(token)
token = self._sig()
if self.tipo(token):
if token.tipo == "identifier": # Se mira si el tipo declarado es una clase
token = TokenVar(token, 'class')
tipo = token.valor
gram.sum(token)
token = self._sig()
if token.tipo == "identifier":
self._tabla.add(token, tipo, 'local') # Se agrega la variable a la tabla
token = self._tabla.trae(token, 'local') # Se busca el indice del token en la tabla
gram.sum(token)
token = self._sig()
while token.valor == ",":
tok_aux = self._sig()
if tok_aux.tipo == "identifier":
self._tabla.add(tok_aux, tipo, 'local') # Se agrega la variable a la tabla
tok_aux = self._tabla.trae(tok_aux, 'local') # Se busca el indice del token en la tabla
gram.sum(token)
gram.sum(tok_aux)
token = self._sig()
else:
self._dev()
self._dev()
return None
if token.valor == ";":
gram.sum(token)
return gram
self._repos(ini)
return None
def compila_declaraciones(self):
ini = self._pos
gram = Gramatica("statements")
dec = self.compila_declaracion()
while dec:
gram.sum(dec)
dec = self.compila_declaracion()
return gram
def compila_declaracion(self):
token = self._sig()
self._dev()
if token.valor == "let":
return self.compila_let()
elif token.valor == "if":
return self.compila_if()
elif token.valor == "while":
return self.compila_while()
elif token.valor == "do":
return self.compila_do()
elif token.valor == "return":
return self.compila_return()
else:
return None
def compila_let(self):
ini = self._pos
gram = Gramatica("letStatement")
token = self._sig()
if token.valor == "let":
gram.sum(token)
token = self._sig()
if token.tipo == "identifier":
token = self._tabla.busca(token)
gram.sum(token)
token = self._sig()
if token.valor == "=":
gram.sum(token)
exp = self.compila_expresion()
if exp:
gram.sum(exp)
token = self._sig()
if token.valor == ";":
gram.sum(token)
return gram
elif token.valor == "[":
gram.sum(token)
exp = self.compila_expresion()
if exp:
gram.sum(exp)
token = self._sig()
if token.valor == "]":
gram.sum(token)
token = self._sig()
if token.valor == "=":
gram.sum(token)
exp = self.compila_expresion()
if exp:
gram.sum(exp)
token = self._sig()
if token.valor == ";":
gram.sum(token)
return gram
self._repos(ini)
return None
def compila_if(self):
ini = self._pos
gram = Gramatica("ifStatement")
token = self._sig()
if token.valor == "if":
gram.sum(token)
token = self._sig()
if token.valor == "(":
gram.sum(token)
exp = self.compila_expresion()
if exp:
gram.sum(exp)
token = self._sig()
if token.valor == ")":
gram.sum(token)
token = self._sig()
if token.valor == "{":
gram.sum(token)
stm = self.compila_declaraciones()
if stm:
gram.sum(stm)
token = self._sig()
if token.valor == "}":
gram.sum(token)
token = self._sig()
if token.valor != "else":
self._dev()
return gram
else:
gram.sum(token)
token = self._sig()
if token.valor == "{":
gram.sum(token)
stm = self.compila_declaraciones()
if stm:
gram.sum(stm)
token = self._sig()
if token.valor == "}":
gram.sum(token)
return gram
self._repos(ini)
return None
def compila_while(self):
ini = self._pos
gram = Gramatica("whileStatement")
token = self._sig()
if token.valor == "while":
gram.sum(token)
token = self._sig()
if token.valor == "(":
gram.sum(token)
exp = self.compila_expresion()
if exp:
gram.sum(exp)
token = self._sig()
if token.valor == ")":
gram.sum(token)
token = self._sig()
if token.valor == "{":
gram.sum(token)
stm = self.compila_declaraciones()
if stm:
gram.sum(stm)
token = self._sig()
if token.valor == "}":
gram.sum(token)
return gram
self._repos(ini)
return None
def compila_do(self):
ini = self._pos
gram = Gramatica("doStatement")
token = self._sig()
if token.valor == "do":
gram.sum(token)
token = self._sig()
if token.tipo == "identifier":
ti = token
token = self._sig()
if token.valor == "(":
gram.sum(TokenVar(ti, 'subroutine'))
gram.sum(token)
exl = self.compila_lista_expresion()
if exl:
gram.sum(exl)
token = self._sig()
if token.valor == ")":
gram.sum(token)
token = self._sig()
if token.valor == ";":
gram.sum(token)
return gram
elif token.valor == ".":
busqueda = self._tabla.busca(ti) # mira si es el nombre de una variable o una clase
if busqueda:
gram.sum(busqueda)
else:
gram.sum(TokenVar(ti, 'class'))
gram.sum(token)
token = self._sig()
if token.tipo == "identifier":
gram.sum(TokenVar(token, 'subroutine'))
token = self._sig()
if token.valor == "(":
gram.sum(token)
exl = self.compila_lista_expresion()
if exl:
gram.sum(exl)
token = self._sig()
if token.valor == ")":
gram.sum(token)
token = self._sig()
if token.valor == ";":
gram.sum(token)
return gram
self._repos(ini)
return None
def compila_return(self):
ini = self._pos
gram = Gramatica("returnStatement")
token = self._sig()
if token.valor == "return":
gram.sum(token)
exp = self.compila_expresion()
if exp:
gram.sum(exp)
token = self._sig()
if token.valor == ";":
gram.sum(token)
return gram
self._repos(ini)
return None
def compila_lista_expresion(self):
ini = self._pos
gram = Gramatica("expressionList")
exp = self.compila_expresion()
if exp:
gram.sum(exp)
token = self._sig()
while token.valor == ",":
exp = self.compila_expresion()
if exp:
gram.sum(token)
gram.sum(exp)
token = self._sig()
else:
self._dev()
return gram
self._dev()
return gram
return gram
def compila_expresion(self):
ini = self._pos
gram = Gramatica("expression")
ter = self.compila_termino()
if ter:
gram.sum(ter)
token = self._sig()
while self.op(token):
ter = self.compila_termino()
if ter:
gram.sum(token)
gram.sum(ter)
token = self._sig()
else:
self._dev()
return gram
self._dev()
return gram
self._repos(ini)
return None
def compila_termino(self):
ini = self._pos
gram = Gramatica("term")
token = self._sig()
if token.tipo == "integerConstant":
gram.sum(token)
return gram
elif token.tipo == "stringConstant":
gram.sum(token)
return gram
elif self.keywordConstant(token):
gram.sum(token)
return gram
elif token.valor == "(":
gram.sum(token)
exp = self.compila_expresion()
if exp:
gram.sum(exp)
token = self._sig()
if token.valor == ")":
gram.sum(token)
return gram
elif self.unaryOp(token):
gram.sum(token)
ter = self.compila_termino()
if ter:
gram.sum(ter)
return gram
elif token.tipo == "identifier":
ti = token # Guardo momentariamente el token
token = self._sig()
if token.valor == "[":
gram.sum(self._tabla.busca(ti)) # Busco el token en la tabla porque es una variable
gram.sum(token)
exp = self.compila_expresion()
if exp:
gram.sum(exp)
token = self._sig()
if token.valor == "]":
gram.sum(token)
return gram
elif token.valor == "(":
gram.sum(TokenVar(ti, 'subroutine')) # Añado el token como subrutina
gram.sum(token)
exl = self.compila_lista_expresion()
if exl:
gram.sum(exl)
token = self._sig()
if token.valor == ")":
gram.sum(token)
return gram
elif token.valor == ".":
busqueda = self._tabla.busca(ti) # mira si es el nombre de una variable o una clase
if busqueda:
gram.sum(busqueda)
else:
gram.sum(TokenVar(ti, 'class'))
#gram.sum(TokenVar(ti, 'class')) # Añado el token como clase
gram.sum(token)
token = self._sig()
if token.tipo == "identifier":
gram.sum(TokenVar(token, 'subroutine')) # Añado el token como subrutina
token = self._sig()
if token.valor == "(":
gram.sum(token)
exl = self.compila_lista_expresion()
if exl:
gram.sum(exl)
token = self._sig()
if token.valor == ")":
gram.sum(token)
return gram
else:
gram.sum(self._tabla.busca(ti)) # Busco el token en la tabla porque es una variable
self._dev()
return gram
self._repos(ini)
return None
def get_reglas(self):
return self._gramaticas
def _sig(self):
self._pos = self._pos + 1
return self._tokens[self._pos-1]
def _dev(self):
self._pos = self._pos - 1
def _repos(self, pos):
self._pos = pos
def keywordConstant(self, token):
return token.valor in ["true", "false", "null", "this"]
def op(self, token):
return token.valor in ["+", "-", "*", "/", "&", "|", "<", ">", "="]
def unaryOp(self, token):
return token.valor in ["-", "~"]
def tipo(self, token):
return token.valor in ['int', 'char', 'boolean'] or token.tipo == "identifier"
def parametro(self, token1, token2):
return self.tipo(token1) and token2.tipo == "identifier"
an = FullAnalizer(r'C:\Users\idop7\Desktop\nand2tetris\projects\10\prueba.jack')
an.analizar()
print(an)
an = FullAnalizer(r'C:\Users\idop7\Desktop\nand2tetris\projects\11\Pong\Main.jack')
an.analizar()
an.
print(an)
class TextGenedator:
def __init__(self, ruta):
fa = FullAnalizer(ruta)
fa.analizar()
self._gramaticas = fa.get_reglas() # Nodo clase del árbol
self._vm = []
self._clase = ''
self._subrutinas = {}
self.nif = 0
self.nwhile = 0
def __str__(self):
txt = ''
for ins in self._vm:
txt = txt + ins + "\n"
return txt
def generar(self):
sub = self._gramaticas.get_sub() # Nodos hijos del nodo clase:
self.define_metodos(sub)
self._clase = sub[1].valor # Guarda el nombre de la clase
if len(sub)-4 > 0:
for i in range(3, len(sub)-1):
subdec = sub[i].get_sub()
if sub[i].valor == "subroutineDec":
self._vm.append(f"function {self._clase}.{subdec[2].valor} {self._subrutinas[subdec[2].valor][2]}")
#input("Stop")
if subdec[0].valor == "method":
self._vm.append("push argument 0")
self._vm.append("pop pointer 0")
if subdec[0].valor == "constructor":
self._vm.append(f"push constant {self.cuenta_campos(sub)}")
self._vm.append("call Memory.alloc 1")
self._vm.append("pop pointer 0")
body = subdec[-1].get_sub() # Entra a los hijos de subroutineBody
self.escribe_cuerpo(body[1:-1]) # Solo se para como parametro los Statement y Clases
def escribir():
archivo = open(self._ruta.replace(".jack", "_.vm"), 'w')
archivo.write(prog)
archivo.close()
self.vaciar_programa()
def define_metodos(self, sub):
if len(sub)-4 > 0:
for i in range(3, len(sub)-1):
subdec = sub[i].get_sub()
if sub[i].valor == "subroutineDec":
subdec = sub[i].get_sub() # Entra a los nodos hijos del primer subroutineDec
nom_subrut = subdec[2].valor
fcm_subrut = subdec[0].valor
tipo_subrut = subdec[1].valor
if fcm_subrut == "method":
param = subdec[4].get_sub() # Enrta a los nodos hijos de parameter list
param = len(param)//3 + 1 + (nom_subrut == "method") # Encuentra el número de parametros a pedir
elif fcm_subrut == "function":
param = self.cuenta_parametros(subdec[-1].get_sub())
else:
param = 0
self._subrutinas[nom_subrut] = [fcm_subrut, tipo_subrut, param]
def cuenta_parametros(self, body):
params = 0
for bdy in body:
if bdy.valor == "varDec":
params = params + len(bdy.get_sub()[1:-1])/2
return int(params)
def cuenta_campos(self, sub):
campos = 0
for s in sub:
if s.valor == "classVarDec":
campos = campos + len(s.get_sub()[1:-1])/2
return int(campos)
def escribe_cuerpo(self, body):
for bdy in body:
if bdy.valor == "statements":
stat = bdy.get_sub()
self.escribe_declaraciones(stat)
def escribe_declaraciones(self, sts):
ni = self.nif
nw = self.nwhile
for dec in sts:
if dec.valor == "letStatement":
self.escribe_let(dec.get_sub())
elif dec.valor == "ifStatement":
self.nif = self.nif + 1
self.escribe_if(dec.get_sub(), ni)
elif dec.valor == "whileStatement":
self.nwhile = self.nwhile + 1
self.escribe_while(dec.get_sub(), nw)
elif dec.valor == "doStatement":
self.escribe_do(dec.get_sub())
elif dec.valor == "returnStatement":
self.escribe_return(dec.get_sub())
def escribe_let(self, let):
if(len(let) == 8):
self.escribe_expresion(let[3].get_sub())
self._vm.append(f"push {let[1].cat} {let[1].ind}")
self._vm.append("add")
self.escribe_expresion(let[6].get_sub())
self._vm.append("pop temp 0")
self._vm.append("pop pointer 1")
self._vm.append("push temp 0")
self._vm.append("pop that 0")
#self._vm.append("-----------------------")
else:
self.escribe_expresion(let[3].get_sub())
self._vm.append(f"pop {let[1].cat} {let[1].ind}")
def escribe_if(self, ifs, n):
self.escribe_expresion(ifs[2].get_sub())
if len(ifs) == 11:
self._vm.append("if-goto IF-TRUE"+str(n))
self._vm.append("goto IF-FALSE"+str(n))
self._vm.append("label IF-TRUE"+str(n))
self.escribe_declaraciones(ifs[5].get_sub())
self._vm.append("goto IF-END"+str(n))
self._vm.append("label IF-FALSE"+str(n))
self.escribe_declaraciones(ifs[9].get_sub())
self._vm.append("label IF-END"+str(n))
else:
self._vm.append("not")
self._vm.append("if-goto IF-END"+str(n))
self.escribe_declaraciones(ifs[5].get_sub())
self._vm.append("label IF-END"+str(n))
def escribe_while(self, whiles, n):
self._vm.append("label WHILE-EXP"+str(n))
self.escribe_expresion(whiles[2].get_sub())
self._vm.append("not")
self._vm.append("if-goto WHILE-END"+str(n))
self.escribe_declaraciones(whiles[5].get_sub())
self._vm.append("goto WHILE-EXP"+str(n))
self._vm.append("label WHILE-END"+str(n))
def escribe_do(self, do):
if do[1].cat == "subroutine":
par = self._subrutinas[do[1].valor][-1]
if self._subrutinas[do[1].valor][0] == "method":
self._vm.append("push pointer 0")
par = par + 1
self.escribe_lista_expresiones(do[3].get_sub())
self._vm.append(f"call {self._clase}.{do[1].valor} {par}")
self._vm.append("pop temp 0")
elif do[1].cat == "class":
self.escribe_lista_expresiones(do[5].get_sub())
self._vm.append(f"call {do[1].valor}.{do[3].valor} {(len(do[5].get_sub())+1)//2}")
self._vm.append("pop temp 0")
else:
self._vm.append(f"push {do[1].cat} {do[1].ind}")
self.escribe_lista_expresiones(do[5].get_sub())
self._vm.append(f"call {do[1].tipo_dato}.{do[3].valor} {(len(do[5].get_sub())+1)//2}")
self._vm.append("pop temp 0")
def escribe_return(self, returns):
if len(returns) == 2:
self._vm.append("push constant 0")
elif returns[1].valor == 'this':
self._vm.append("push pointer 0")
else:
self.escribe_expresion(returns[1].get_sub())
self._vm.append("return")
def escribe_lista_expresiones(self, lista):
if len(lista) > 0:
self.escribe_expresion(lista[0].get_sub())
for ind in range(2, len(lista), 2):
self.escribe_expresion(lista[ind].get_sub())
def escribe_expresion(self, exp):
self.escribe_termino(exp[0].get_sub())
for ind in range(2,len(exp),2):
self.escribe_termino(exp[ind].get_sub())
self._vm.append(self._op(exp[ind-1].valor))
def escribe_termino(self, term):
tok1 = term[0]
if tok1.tipo == "integerConstant":
self._vm.append(f"push constant {tok1.valor}")
elif tok1.tipo == "stringConstant":
self._vm.append(f"push constant {len(tok1.valor)}")
self._vm.append("call String.new 1")
for c in tok1.valor:
self._vm.append(f"push constant {ord(c)}")
self._vm.append(f"call String.appendChar 2")
elif tok1.valor == "true":
self._vm.append("push constant 0")
self._vm.append("not")
elif tok1.valor == "false":
self._vm.append("push constant 0")
elif tok1.valor == "null":
self._vm.append("push constant 0")
elif tok1.valor == "this":
self._vm.append("push pointer 0")
elif tok1.valor == "(":
self.escribe_expresion(term[1].get_sub())
elif tok1.tipo == "symbol":
self.escribe_termino(term[1].get_sub())
self._vm.append(self._un(tok1.valor))
elif tok1.cat == "subroutine":
if self._subrutinas[term[0].valor][0] == "method":
self._vm.append("push pointer 0")
self.escribe_lista_expresiones(ter[2].get_sub())
self._vm.append(f"call {self._clase}.{term[0].valor} {self._subrutinas[term[0].valor][-1]}")
if self._subrutinas[term[0].valor][0] != "method":
self._vm.append("pop temp 0")
elif tok1.cat == "class":
self.escribe_lista_expresiones(term[4].get_sub())
self._vm.append(f"call {term[0].valor}.{term[2].valor} {(len(term[4].get_sub())+1)//2}")
#self._vm.append("pop temp 0")
elif tok1.cat == "pointer" and len(term) == 6:
self._vm.append(f"push {tok1.cat} {tok1.ind}")
self.escribe_lista_expresiones(term[4].get_sub())
self._vm.append(f"call corregir.{term[2].valor} {(len(term[4].get_sub())+1)//2}")
elif len(term) == 4:
self.escribe_expresion(term[2].get_sub())
self._vm.append(f"push {tok1.cat} {tok1.ind}")
self._vm.append("add")
self._vm.append("pop pointer 1")
self._vm.append("push that 0")
else:
self._vm.append(f"push {tok1.cat} {tok1.ind}")
def _op(self, operador):
oper = {'+': "add", '-': "sub", '*': "call Math.multiply 2", '/': "call Math.multiply 2", '&': "and", '|':"or", '<':"lt", '>':"gt", '=':"eq"}
return oper[operador]
def _un(self, operador):
ops = {'-': "neg", '~': "not"}
return ops[operador]
vm = TextGenedator(r"C:\Users\idop7\Desktop\nand2tetris\projects\11\ComplexArrays\Main.jack")
vm.generar()
print(vm)
vm = FullAnalizer(r"C:\Users\idop7\Desktop\nand2tetris\projects\11\ComplexArrays\Main.jack")
vm.analizar()
print(vm)
for i in range(3,3):
print(i)
| 0.2819 | 0.782642 |
# Image Classification (CIFAR-10) on Kaggle
:label:`chapter_kaggle_cifar10`
So far, we have been using Gluon's `data` package to directly obtain image data sets in NDArray format. In practice, however, image data sets often exist in the format of image files. In this section, we will start with the original image files and organize, read, and convert the files to NDArray format step by step.
We performed an experiment on the CIFAR-10 data set in :numref:`chapter_image_augmentation`.
This is an important data
set in the computer vision field. Now, we will apply the knowledge we learned in
the previous sections in order to participate in the Kaggle competition, which
addresses CIFAR-10 image classification problems. The competition’s web address
is
> https://www.kaggle.com/c/cifar-10
Figure 11.16 shows the information on the competition's webpage. In order to submit the results, please register an account on the Kaggle website first.

:width:`600px`
First, import the packages or modules required for the competition.
```
import d2l
from mxnet import autograd, gluon, init
from mxnet.gluon import nn
import os
import pandas as pd
import shutil
import time
```
## Obtain and Organize the Data Sets
The competition data is divided into a training set and testing set. The training set contains 50,000 images. The testing set contains 300,000 images, of which 10,000 images are used for scoring, while the other 290,000 non-scoring images are included to prevent the manual labeling of the testing set and the submission of labeling results. The image formats in both data sets are PNG, with heights and widths of 32 pixels and three color channels (RGB). The images cover 10 categories: planes, cars, birds, cats, deer, dogs, frogs, horses, boats, and trucks. The upper-left corner of Figure 11.16 shows some images of planes, cars, and birds in the data set.
### Download the Data Set
After logging in to Kaggle, we can click on the "Data" tab on the CIFAR-10 image classification competition webpage shown in Figure 11.16 and download the training data set "train.7z", the testing data set "test.7z", and the training data set labels "trainlabels.csv".
### Unzip the Data Set
The training data set "train.7z" and the test data set "test.7z" need to be unzipped after downloading. After unzipping the data sets, store the training data set, test data set, and training data set labels in the following respective paths:
* ../data/kaggle_cifar10/train/[1-50000].png
* ../data/kaggle_cifar10/test/[1-300000].png
* ../data/kaggle_cifar10/trainLabels.csv
To make it easier to get started, we provide a small-scale sample of the data set mentioned above. "train_tiny.zip" contains 100 training examples, while "test_tiny.zip" contains only one test example. Their unzipped folder names are "train_tiny" and "test_tiny", respectively. In addition, unzip the zip file of the training data set labels to obtain the file "trainlabels.csv". If you are going to use the full data set of the Kaggle competition, you will also need to change the following `demo` variable to `False`.
```
# If you use the full data set downloaded for the Kaggle competition, change
# the demo variable to False
demo = True
if demo:
import zipfile
for f in ['train_tiny.zip', 'test_tiny.zip', 'trainLabels.csv.zip']:
with zipfile.ZipFile('../data/kaggle_cifar10/' + f, 'r') as z:
z.extractall('../data/kaggle_cifar10/')
```
### Organize the Data Set
We need to organize data sets to facilitate model training and testing. The following `read_label_file` function will be used to read the label file for the training data set. The parameter `valid_ratio` in this function is the ratio of the number of examples in the validation set to the number of examples in the original training set.
```
def read_label_file(data_dir, label_file, train_dir, valid_ratio):
with open(os.path.join(data_dir, label_file), 'r') as f:
# Skip the file header line (column name)
lines = f.readlines()[1:]
tokens = [l.rstrip().split(',') for l in lines]
idx_label = dict(((int(idx), label) for idx, label in tokens))
labels = set(idx_label.values())
n_train_valid = len(os.listdir(os.path.join(data_dir, train_dir)))
n_train = int(n_train_valid * (1 - valid_ratio))
assert 0 < n_train < n_train_valid
return n_train // len(labels), idx_label
```
Below we define a helper function to create a path only if the path does not already exist.
```
# save to the d2l package.
def mkdir_if_not_exist(path):
if not os.path.exists(os.path.join(*path)):
os.makedirs(os.path.join(*path))
```
Next, we define the `reorg_train_valid` function to segment the validation set from the original training set. Here, we use `valid_ratio=0.1` as an example. Since the original training set has 50,000 images, there will be 45,000 images used for training and stored in the path “`input_dir/train`” when tuning hyper-parameters, while the other 5,000 images will be stored as validation set in the path “`input_dir/valid`”. After organizing the data, images of the same type will be placed under the same folder so that we can read them later.
```
def reorg_train_valid(data_dir, train_dir, input_dir, n_train_per_label,
idx_label):
label_count = {}
for train_file in os.listdir(os.path.join(data_dir, train_dir)):
idx = int(train_file.split('.')[0])
label = idx_label[idx]
mkdir_if_not_exist([data_dir, input_dir, 'train_valid', label])
shutil.copy(os.path.join(data_dir, train_dir, train_file),
os.path.join(data_dir, input_dir, 'train_valid', label))
if label not in label_count or label_count[label] < n_train_per_label:
mkdir_if_not_exist([data_dir, input_dir, 'train', label])
shutil.copy(os.path.join(data_dir, train_dir, train_file),
os.path.join(data_dir, input_dir, 'train', label))
label_count[label] = label_count.get(label, 0) + 1
else:
mkdir_if_not_exist([data_dir, input_dir, 'valid', label])
shutil.copy(os.path.join(data_dir, train_dir, train_file),
os.path.join(data_dir, input_dir, 'valid', label))
```
The `reorg_test` function below is used to organize the testing set to facilitate the reading during prediction.
```
def reorg_test(data_dir, test_dir, input_dir):
mkdir_if_not_exist([data_dir, input_dir, 'test', 'unknown'])
for test_file in os.listdir(os.path.join(data_dir, test_dir)):
shutil.copy(os.path.join(data_dir, test_dir, test_file),
os.path.join(data_dir, input_dir, 'test', 'unknown'))
```
Finally, we use a function to call the previously defined `reorg_test`, `reorg_train_valid`, and `reorg_test` functions.
```
def reorg_cifar10_data(data_dir, label_file, train_dir, test_dir, input_dir,
valid_ratio):
n_train_per_label, idx_label = read_label_file(data_dir, label_file,
train_dir, valid_ratio)
reorg_train_valid(data_dir, train_dir, input_dir, n_train_per_label,
idx_label)
reorg_test(data_dir, test_dir, input_dir)
```
We use only 100 training example and one test example here. The folder names for the training and testing data sets are "train_tiny" and "test_tiny", respectively. Accordingly, we only set the batch size to 1. During actual training and testing, the complete data set of the Kaggle competition should be used and `batch_size` should be set to a larger integer, such as 128. We use 10% of the training examples as the validation set for tuning hyper-parameters.
```
if demo:
# Note: Here, we use small training sets and small testing sets and the
# batch size should be set smaller. When using the complete data set for
# the Kaggle competition, the batch size can be set to a large integer
train_dir, test_dir, batch_size = 'train_tiny', 'test_tiny', 1
else:
train_dir, test_dir, batch_size = 'train', 'test', 128
data_dir, label_file = '../data/kaggle_cifar10', 'trainLabels.csv'
input_dir, valid_ratio = 'train_valid_test', 0.1
reorg_cifar10_data(data_dir, label_file, train_dir, test_dir, input_dir,
valid_ratio)
```
## Image Augmentation
To cope with overfitting, we use image augmentation. For example, by adding `transforms.RandomFlipLeftRight()`, the images can be flipped at random. We can also perform normalization for the three RGB channels of color images using `transforms.Normalize()`. Below, we list some of these operations that you can choose to use or modify depending on requirements.
```
transform_train = gluon.data.vision.transforms.Compose([
# Magnify the image to a square of 40 pixels in both height and width
gluon.data.vision.transforms.Resize(40),
# Randomly crop a square image of 40 pixels in both height and width to
# produce a small square of 0.64 to 1 times the area of the original
# image, and then shrink it to a square of 32 pixels in both height and
# width
gluon.data.vision.transforms.RandomResizedCrop(32, scale=(0.64, 1.0),
ratio=(1.0, 1.0)),
gluon.data.vision.transforms.RandomFlipLeftRight(),
gluon.data.vision.transforms.ToTensor(),
# Normalize each channel of the image
gluon.data.vision.transforms.Normalize([0.4914, 0.4822, 0.4465],
[0.2023, 0.1994, 0.2010])])
```
In order to ensure the certainty of the output during testing, we only perform normalization on the image.
```
transform_test = gluon.data.vision.transforms.Compose([
gluon.data.vision.transforms.ToTensor(),
gluon.data.vision.transforms.Normalize([0.4914, 0.4822, 0.4465],
[0.2023, 0.1994, 0.2010])])
```
## Read the Data Set
Next, we can create the `ImageFolderDataset` instance to read the organized data set containing the original image files, where each data instance includes the image and label.
```
# Read the original image file. Flag=1 indicates that the input image has
# three channels (color)
train_ds = gluon.data.vision.ImageFolderDataset(
os.path.join(data_dir, input_dir, 'train'), flag=1)
valid_ds = gluon.data.vision.ImageFolderDataset(
os.path.join(data_dir, input_dir, 'valid'), flag=1)
train_valid_ds = gluon.data.vision.ImageFolderDataset(
os.path.join(data_dir, input_dir, 'train_valid'), flag=1)
test_ds = gluon.data.vision.ImageFolderDataset(
os.path.join(data_dir, input_dir, 'test'), flag=1)
```
We specify the defined image augmentation operation in `DataLoader`. During training, we only use the validation set to evaluate the model, so we need to ensure the certainty of the output. During prediction, we will train the model on the combined training set and validation set to make full use of all labelled data.
```
train_iter = gluon.data.DataLoader(train_ds.transform_first(transform_train),
batch_size, shuffle=True, last_batch='keep')
valid_iter = gluon.data.DataLoader(valid_ds.transform_first(transform_test),
batch_size, shuffle=True, last_batch='keep')
train_valid_iter = gluon.data.DataLoader(train_valid_ds.transform_first(
transform_train), batch_size, shuffle=True, last_batch='keep')
test_iter = gluon.data.DataLoader(test_ds.transform_first(transform_test),
batch_size, shuffle=False, last_batch='keep')
```
## Define the Model
Here, we build the residual blocks based on the HybridBlock class, which is
slightly different than the implementation described in
:numref:`chapter_resnet`. This is done to improve execution efficiency.
```
class Residual(nn.HybridBlock):
def __init__(self, num_channels, use_1x1conv=False, strides=1, **kwargs):
super(Residual, self).__init__(**kwargs)
self.conv1 = nn.Conv2D(num_channels, kernel_size=3, padding=1,
strides=strides)
self.conv2 = nn.Conv2D(num_channels, kernel_size=3, padding=1)
if use_1x1conv:
self.conv3 = nn.Conv2D(num_channels, kernel_size=1,
strides=strides)
else:
self.conv3 = None
self.bn1 = nn.BatchNorm()
self.bn2 = nn.BatchNorm()
def hybrid_forward(self, F, X):
Y = F.relu(self.bn1(self.conv1(X)))
Y = self.bn2(self.conv2(Y))
if self.conv3:
X = self.conv3(X)
return F.relu(Y + X)
```
Next, we define the ResNet-18 model.
```
def resnet18(num_classes):
net = nn.HybridSequential()
net.add(nn.Conv2D(64, kernel_size=3, strides=1, padding=1),
nn.BatchNorm(), nn.Activation('relu'))
def resnet_block(num_channels, num_residuals, first_block=False):
blk = nn.HybridSequential()
for i in range(num_residuals):
if i == 0 and not first_block:
blk.add(Residual(num_channels, use_1x1conv=True, strides=2))
else:
blk.add(Residual(num_channels))
return blk
net.add(resnet_block(64, 2, first_block=True),
resnet_block(128, 2),
resnet_block(256, 2),
resnet_block(512, 2))
net.add(nn.GlobalAvgPool2D(), nn.Dense(num_classes))
return net
```
The CIFAR-10 image classification challenge uses 10 categories. We will perform Xavier random initialization on the model before training begins.
```
def get_net(ctx):
num_classes = 10
net = resnet18(num_classes)
net.initialize(ctx=ctx, init=init.Xavier())
return net
loss = gluon.loss.SoftmaxCrossEntropyLoss()
```
## Define the Training Functions
We will select the model and tune hyper-parameters according to the model's performance on the validation set. Next, we define the model training function `train`. We record the training time of each epoch, which helps us compare the time costs of different models.
```
def train(net, train_iter, valid_iter, num_epochs, lr, wd, ctx, lr_period,
lr_decay):
trainer = gluon.Trainer(net.collect_params(), 'sgd',
{'learning_rate': lr, 'momentum': 0.9, 'wd': wd})
for epoch in range(num_epochs):
train_l_sum, train_acc_sum, n, start = 0.0, 0.0, 0, time.time()
if epoch > 0 and epoch % lr_period == 0:
trainer.set_learning_rate(trainer.learning_rate * lr_decay)
for X, y in train_iter:
y = y.astype('float32').as_in_context(ctx)
with autograd.record():
y_hat = net(X.as_in_context(ctx))
l = loss(y_hat, y).sum()
l.backward()
trainer.step(batch_size)
train_l_sum += l.asscalar()
train_acc_sum += (y_hat.argmax(axis=1) == y).sum().asscalar()
n += y.size
time_s = "time %.2f sec" % (time.time() - start)
if valid_iter is not None:
valid_acc = d2l.evaluate_accuracy_gpu(net, valid_iter)
epoch_s = ("epoch %d, loss %f, train acc %f, valid acc %f, "
% (epoch + 1, train_l_sum / n, train_acc_sum / n,
valid_acc))
else:
epoch_s = ("epoch %d, loss %f, train acc %f, " %
(epoch + 1, train_l_sum / n, train_acc_sum / n))
print(epoch_s + time_s + ', lr ' + str(trainer.learning_rate))
```
## Train and Validate the Model
Now, we can train and validate the model. The following hyper-parameters can be tuned. For example, we can increase the number of epochs. Because `lr_period` and `lr_decay` are set to 80 and 0.1 respectively, the learning rate of the optimization algorithm will be multiplied by 0.1 after every 80 epochs. For simplicity, we only train one epoch here.
```
ctx, num_epochs, lr, wd = d2l.try_gpu(), 1, 0.1, 5e-4
lr_period, lr_decay, net = 80, 0.1, get_net(ctx)
net.hybridize()
train(net, train_iter, valid_iter, num_epochs, lr, wd, ctx, lr_period,
lr_decay)
```
## Classify the Testing Set and Submit Results on Kaggle
After obtaining a satisfactory model design and hyper-parameters, we use all training data sets (including validation sets) to retrain the model and classify the testing set.
```
net, preds = get_net(ctx), []
net.hybridize()
train(net, train_valid_iter, None, num_epochs, lr, wd, ctx, lr_period,
lr_decay)
for X, _ in test_iter:
y_hat = net(X.as_in_context(ctx))
preds.extend(y_hat.argmax(axis=1).astype(int).asnumpy())
sorted_ids = list(range(1, len(test_ds) + 1))
sorted_ids.sort(key=lambda x: str(x))
df = pd.DataFrame({'id': sorted_ids, 'label': preds})
df['label'] = df['label'].apply(lambda x: train_valid_ds.synsets[x])
df.to_csv('submission.csv', index=False)
```
After executing the above code, we will get a "submission.csv" file. The format
of this file is consistent with the Kaggle competition requirements. The method
for submitting results is similar to method in :numref:`chapter_kaggle_house`.
## Summary
* We can create an `ImageFolderDataset` instance to read the data set containing the original image files.
* We can use convolutional neural networks, image augmentation, and hybrid programming to take part in an image classification competition.
## Exercises
* Use the complete CIFAF-10 data set for the Kaggle competition. Change the `batch_size` and number of epochs `num_epochs` to 128 and 100, respectively. See what accuracy and ranking you can achieve in this competition.
* What accuracy can you achieve when not using image augmentation?
* Scan the QR code to access the relevant discussions and exchange ideas about the methods used and the results obtained with the community. Can you come up with any better techniques?
## Scan the QR Code to [Discuss](https://discuss.mxnet.io/t/2450)

|
github_jupyter
|
import d2l
from mxnet import autograd, gluon, init
from mxnet.gluon import nn
import os
import pandas as pd
import shutil
import time
# If you use the full data set downloaded for the Kaggle competition, change
# the demo variable to False
demo = True
if demo:
import zipfile
for f in ['train_tiny.zip', 'test_tiny.zip', 'trainLabels.csv.zip']:
with zipfile.ZipFile('../data/kaggle_cifar10/' + f, 'r') as z:
z.extractall('../data/kaggle_cifar10/')
def read_label_file(data_dir, label_file, train_dir, valid_ratio):
with open(os.path.join(data_dir, label_file), 'r') as f:
# Skip the file header line (column name)
lines = f.readlines()[1:]
tokens = [l.rstrip().split(',') for l in lines]
idx_label = dict(((int(idx), label) for idx, label in tokens))
labels = set(idx_label.values())
n_train_valid = len(os.listdir(os.path.join(data_dir, train_dir)))
n_train = int(n_train_valid * (1 - valid_ratio))
assert 0 < n_train < n_train_valid
return n_train // len(labels), idx_label
# save to the d2l package.
def mkdir_if_not_exist(path):
if not os.path.exists(os.path.join(*path)):
os.makedirs(os.path.join(*path))
def reorg_train_valid(data_dir, train_dir, input_dir, n_train_per_label,
idx_label):
label_count = {}
for train_file in os.listdir(os.path.join(data_dir, train_dir)):
idx = int(train_file.split('.')[0])
label = idx_label[idx]
mkdir_if_not_exist([data_dir, input_dir, 'train_valid', label])
shutil.copy(os.path.join(data_dir, train_dir, train_file),
os.path.join(data_dir, input_dir, 'train_valid', label))
if label not in label_count or label_count[label] < n_train_per_label:
mkdir_if_not_exist([data_dir, input_dir, 'train', label])
shutil.copy(os.path.join(data_dir, train_dir, train_file),
os.path.join(data_dir, input_dir, 'train', label))
label_count[label] = label_count.get(label, 0) + 1
else:
mkdir_if_not_exist([data_dir, input_dir, 'valid', label])
shutil.copy(os.path.join(data_dir, train_dir, train_file),
os.path.join(data_dir, input_dir, 'valid', label))
def reorg_test(data_dir, test_dir, input_dir):
mkdir_if_not_exist([data_dir, input_dir, 'test', 'unknown'])
for test_file in os.listdir(os.path.join(data_dir, test_dir)):
shutil.copy(os.path.join(data_dir, test_dir, test_file),
os.path.join(data_dir, input_dir, 'test', 'unknown'))
def reorg_cifar10_data(data_dir, label_file, train_dir, test_dir, input_dir,
valid_ratio):
n_train_per_label, idx_label = read_label_file(data_dir, label_file,
train_dir, valid_ratio)
reorg_train_valid(data_dir, train_dir, input_dir, n_train_per_label,
idx_label)
reorg_test(data_dir, test_dir, input_dir)
if demo:
# Note: Here, we use small training sets and small testing sets and the
# batch size should be set smaller. When using the complete data set for
# the Kaggle competition, the batch size can be set to a large integer
train_dir, test_dir, batch_size = 'train_tiny', 'test_tiny', 1
else:
train_dir, test_dir, batch_size = 'train', 'test', 128
data_dir, label_file = '../data/kaggle_cifar10', 'trainLabels.csv'
input_dir, valid_ratio = 'train_valid_test', 0.1
reorg_cifar10_data(data_dir, label_file, train_dir, test_dir, input_dir,
valid_ratio)
transform_train = gluon.data.vision.transforms.Compose([
# Magnify the image to a square of 40 pixels in both height and width
gluon.data.vision.transforms.Resize(40),
# Randomly crop a square image of 40 pixels in both height and width to
# produce a small square of 0.64 to 1 times the area of the original
# image, and then shrink it to a square of 32 pixels in both height and
# width
gluon.data.vision.transforms.RandomResizedCrop(32, scale=(0.64, 1.0),
ratio=(1.0, 1.0)),
gluon.data.vision.transforms.RandomFlipLeftRight(),
gluon.data.vision.transforms.ToTensor(),
# Normalize each channel of the image
gluon.data.vision.transforms.Normalize([0.4914, 0.4822, 0.4465],
[0.2023, 0.1994, 0.2010])])
transform_test = gluon.data.vision.transforms.Compose([
gluon.data.vision.transforms.ToTensor(),
gluon.data.vision.transforms.Normalize([0.4914, 0.4822, 0.4465],
[0.2023, 0.1994, 0.2010])])
# Read the original image file. Flag=1 indicates that the input image has
# three channels (color)
train_ds = gluon.data.vision.ImageFolderDataset(
os.path.join(data_dir, input_dir, 'train'), flag=1)
valid_ds = gluon.data.vision.ImageFolderDataset(
os.path.join(data_dir, input_dir, 'valid'), flag=1)
train_valid_ds = gluon.data.vision.ImageFolderDataset(
os.path.join(data_dir, input_dir, 'train_valid'), flag=1)
test_ds = gluon.data.vision.ImageFolderDataset(
os.path.join(data_dir, input_dir, 'test'), flag=1)
train_iter = gluon.data.DataLoader(train_ds.transform_first(transform_train),
batch_size, shuffle=True, last_batch='keep')
valid_iter = gluon.data.DataLoader(valid_ds.transform_first(transform_test),
batch_size, shuffle=True, last_batch='keep')
train_valid_iter = gluon.data.DataLoader(train_valid_ds.transform_first(
transform_train), batch_size, shuffle=True, last_batch='keep')
test_iter = gluon.data.DataLoader(test_ds.transform_first(transform_test),
batch_size, shuffle=False, last_batch='keep')
class Residual(nn.HybridBlock):
def __init__(self, num_channels, use_1x1conv=False, strides=1, **kwargs):
super(Residual, self).__init__(**kwargs)
self.conv1 = nn.Conv2D(num_channels, kernel_size=3, padding=1,
strides=strides)
self.conv2 = nn.Conv2D(num_channels, kernel_size=3, padding=1)
if use_1x1conv:
self.conv3 = nn.Conv2D(num_channels, kernel_size=1,
strides=strides)
else:
self.conv3 = None
self.bn1 = nn.BatchNorm()
self.bn2 = nn.BatchNorm()
def hybrid_forward(self, F, X):
Y = F.relu(self.bn1(self.conv1(X)))
Y = self.bn2(self.conv2(Y))
if self.conv3:
X = self.conv3(X)
return F.relu(Y + X)
def resnet18(num_classes):
net = nn.HybridSequential()
net.add(nn.Conv2D(64, kernel_size=3, strides=1, padding=1),
nn.BatchNorm(), nn.Activation('relu'))
def resnet_block(num_channels, num_residuals, first_block=False):
blk = nn.HybridSequential()
for i in range(num_residuals):
if i == 0 and not first_block:
blk.add(Residual(num_channels, use_1x1conv=True, strides=2))
else:
blk.add(Residual(num_channels))
return blk
net.add(resnet_block(64, 2, first_block=True),
resnet_block(128, 2),
resnet_block(256, 2),
resnet_block(512, 2))
net.add(nn.GlobalAvgPool2D(), nn.Dense(num_classes))
return net
def get_net(ctx):
num_classes = 10
net = resnet18(num_classes)
net.initialize(ctx=ctx, init=init.Xavier())
return net
loss = gluon.loss.SoftmaxCrossEntropyLoss()
def train(net, train_iter, valid_iter, num_epochs, lr, wd, ctx, lr_period,
lr_decay):
trainer = gluon.Trainer(net.collect_params(), 'sgd',
{'learning_rate': lr, 'momentum': 0.9, 'wd': wd})
for epoch in range(num_epochs):
train_l_sum, train_acc_sum, n, start = 0.0, 0.0, 0, time.time()
if epoch > 0 and epoch % lr_period == 0:
trainer.set_learning_rate(trainer.learning_rate * lr_decay)
for X, y in train_iter:
y = y.astype('float32').as_in_context(ctx)
with autograd.record():
y_hat = net(X.as_in_context(ctx))
l = loss(y_hat, y).sum()
l.backward()
trainer.step(batch_size)
train_l_sum += l.asscalar()
train_acc_sum += (y_hat.argmax(axis=1) == y).sum().asscalar()
n += y.size
time_s = "time %.2f sec" % (time.time() - start)
if valid_iter is not None:
valid_acc = d2l.evaluate_accuracy_gpu(net, valid_iter)
epoch_s = ("epoch %d, loss %f, train acc %f, valid acc %f, "
% (epoch + 1, train_l_sum / n, train_acc_sum / n,
valid_acc))
else:
epoch_s = ("epoch %d, loss %f, train acc %f, " %
(epoch + 1, train_l_sum / n, train_acc_sum / n))
print(epoch_s + time_s + ', lr ' + str(trainer.learning_rate))
ctx, num_epochs, lr, wd = d2l.try_gpu(), 1, 0.1, 5e-4
lr_period, lr_decay, net = 80, 0.1, get_net(ctx)
net.hybridize()
train(net, train_iter, valid_iter, num_epochs, lr, wd, ctx, lr_period,
lr_decay)
net, preds = get_net(ctx), []
net.hybridize()
train(net, train_valid_iter, None, num_epochs, lr, wd, ctx, lr_period,
lr_decay)
for X, _ in test_iter:
y_hat = net(X.as_in_context(ctx))
preds.extend(y_hat.argmax(axis=1).astype(int).asnumpy())
sorted_ids = list(range(1, len(test_ds) + 1))
sorted_ids.sort(key=lambda x: str(x))
df = pd.DataFrame({'id': sorted_ids, 'label': preds})
df['label'] = df['label'].apply(lambda x: train_valid_ds.synsets[x])
df.to_csv('submission.csv', index=False)
| 0.441191 | 0.923316 |
# Working with code cells
In this notebook you'll get some experience working with code cells.
First, run the cell below. As I mentioned before, you can run the cell by selecting it the click the "run cell" button above. However, it's easier to run it by pressing **Shift + Enter** so you don't have to take your hands away from the keyboard.
```
# Select the cell, then press Shift + Enter
3**2
```
Shift + Enter runs the cell then selects the next cell or creates a new one if necessary. You can run a cell without changing the selected cell by pressing **Control + Enter**.
The output shows up below the cell. It's printing out the result just like in a normal Python shell. Only the very last result in a cell will be printed though. Otherwise, you'll need to use `print()` print out any variables.
> **Exercise:** Run the next two cells to test this out. Think about what you expect to happen, then try it.
```
3**2
4**2
print(3**2)
4**2
```
Now try assigning a value to a variable.
```
mindset = 'growth'
```
There is no output, `'growth'` has been assigned to the variable `mindset`. All variables, functions, and classes created in a cell are available in every other cell in the notebook.
What do you think the output will be when you run the next cell? Feel free to play around with this a bit to get used to how it works.
```
mindset[:4]
```
## Code completion
When you're writing code, you'll often be using a variable or function often and can save time by using code completion. That is, you only need to type part of the name, then press **tab**.
> **Exercise:** Place the cursor at the end of `mind` in the next cell and press **tab**
```
mindset
```
Here, completing `mind` writes out the full variable name `mindset`. If there are multiple names that start the same, you'll get a menu, see below.
```
# Run this cell
mindful = True
# Complete the name here again, choose one from the menu
mindset
```
Remember that variables assigned in one cell are available in all cells. This includes cells that you've previously run and cells that are above where the variable was assigned. Try doing the code completion on the cell third up from here.
Code completion also comes in handy if you're using a module but don't quite remember which function you're looking for or what the available functions are. I'll show you how this works with the [random](https://docs.python.org/3/library/random.html) module. This module provides functions for generating random numbers, often useful for making fake data or picking random items from lists.
```
# Run this
import random
```
> **Exercise:** In the cell below, place the cursor after `random.` then press **tab** to bring up the code completion menu for the module. Choose `random.randint` from the list, you can move through the menu with the up and down arrow keys.
```
random.ra
```
Above you should have seen all the functions available from the random module. Maybe you're looking to draw random numbers from a [Gaussian distribution](https://en.wikipedia.org/wiki/Normal_distribution), also known as the normal distribution or the "bell curve".
## Tooltips
You see there is the function `random.gauss` but how do you use it? You could check out the [documentation](https://docs.python.org/3/library/random.html), or just look up the documentation in the notebook itself.
> **Exercise:** In the cell below, place the cursor after `random.gauss` the press **shift + tab** to bring up the tooltip.
```
random.gauss
```
You should have seen some simple documentation like this:
Signature: random.gauss(mu, sigma)
Docstring:
Gaussian distribution.
The function takes two arguments, `mu` and `sigma`. These are the standard symbols for the mean and the standard deviation, respectively, of the Gaussian distribution. Maybe you're not familiar with this though, and you need to know what the parameters actually mean. This will happen often, you'll find some function, but you need more information. You can show more information by pressing **shift + tab** twice.
> **Exercise:** In the cell below, show the full help documentation by pressing **shift + tab** twice.
```
random.gauss
```
You should see more help text like this:
mu is the mean, and sigma is the standard deviation. This is
slightly faster than the normalvariate() function.
|
github_jupyter
|
# Select the cell, then press Shift + Enter
3**2
3**2
4**2
print(3**2)
4**2
mindset = 'growth'
mindset[:4]
mindset
# Run this cell
mindful = True
# Complete the name here again, choose one from the menu
mindset
# Run this
import random
random.ra
random.gauss
random.gauss
| 0.396886 | 0.989551 |
# Estadística con Python
## Introducción a Python
### GitHub repository: https://github.com/jorgemauricio/python_statistics
### Instructor: Jorge Mauricio
Este notebook de jupyter te permite comprender los principios básicos de la sintáxis que utiliza `Python`
**Por el momento el código no contiene ningún tipo de explicación**
### Temas que se incluyen
* Tipos de datos
* Numbers
* Strings
* Printing
* Lists
* Dictionaries
* Booleans
* Tuples
* Sets
* Operadores comparativos
* if,elif, else
* for
* while
* range()
* list
* Funciones
* lambda
* map and filter
* métodos
## Tipos de datos
### Numbers
```
1 + 1
1 * 3
1 / 2
2 ** 4
4 % 2
5 % 2
(2 + 3) * (5 + 5)
```
### Asignar variables
```
# No pueden comenzar con número o un carácter especial
name_of_var = 2
x = 2
y = 3
z = x + y
z
```
### Strings
```
'single quotes'
"double quotes"
" wrap lot's of other quotes"
```
### Print
```
x = 'hello'
x
print(x)
num = 12
name = 'Sam'
print('My number is: {one}, and my name is: {two}'.format(one=num,two=name))
print('My number is: {}, and my name is: {}'.format(num,name))
```
### Lists
```
[1,2,3]
['hi',1,[1,2]]
my_list = ['a','b','c']
my_list.append('d')
my_list
my_list[0]
my_list[1]
my_list[1:]
my_list[:1]
my_list[0] = 'NEW'
my_list
nest = [1,2,3,[4,5,['target']]]
nest[3]
nest[3][2]
nest[3][2][0]
```
### Dictionaries
```
d = {'key1':'item1','key2':'item2'}
d
d['key1']
```
### Booleans
```
True
False
```
### Tuples
```
t = (1,2,3)
t[0]
t[0] = 'NEW'
```
### Sets
```
{1,2,3}
{1,2,3,1,2,1,2,3,3,3,3,2,2,2,1,1,2}
```
## Comparison Operators
```
1 > 2
1 < 2
1 >= 1
1 <= 4
1 == 1
'hi' == 'bye'
```
## Operadores lógicos
```
(1 > 2) and (2 < 3)
(1 > 2) or (2 < 3)
(1 == 2) or (2 == 3) or (4 == 4)
```
## if, elif, else
```
if 1 < 2:
print('Yep!')
if 1 < 2:
print('yep!')
if 1 < 2:
print('first')
else:
print('last')
if 1 > 2:
print('first')
else:
print('last')
if 1 == 2:
print('first')
elif 3 == 3:
print('middle')
else:
print('Last')
```
## for
```
seq = [1,2,3,4,5]
for item in seq:
print(item)
for item in seq:
print('Yep')
for jelly in seq:
print(jelly+jelly)
```
## while
```
i = 1
while i < 5:
print('i is: {}'.format(i))
i = i+1
```
## range()
```
range(5)
for i in range(5):
print(i)
list(range(5))
```
## list comprehension
```
x = [1,2,3,4]
out = []
for item in x:
out.append(item**2)
print(out)
[item**2 for item in x]
```
## Funciones
```
def my_func(param1='default'):
"""
Docstring goes here.
"""
print(param1)
my_func
my_func()
my_func('new param')
my_func(param1='new param')
def square(x):
return x**2
out = square(2)
print(out)
```
## lambda
```
def times2(var):
return var*2
times2(2)
lambda var: var*2
```
## map and filter
```
seq = [1,2,3,4,5]
map(times2,seq)
list(map(times2,seq))
list(map(lambda var: var*2,seq))
filter(lambda item: item%2 == 0,seq)
list(filter(lambda item: item%2 == 0,seq))
```
## Métodos
```
st = 'hello my name is Sam'
st.lower()
st.upper()
st.split()
tweet = 'Go Sports! #Sports'
tweet.split('#')
tweet.split('#')[1]
d
d.keys()
d.items()
lst = [1,2,3]
lst.pop()
lst
'x' in [1,2,3]
'x' in ['x','y','z']
```
|
github_jupyter
|
1 + 1
1 * 3
1 / 2
2 ** 4
4 % 2
5 % 2
(2 + 3) * (5 + 5)
# No pueden comenzar con número o un carácter especial
name_of_var = 2
x = 2
y = 3
z = x + y
z
'single quotes'
"double quotes"
" wrap lot's of other quotes"
x = 'hello'
x
print(x)
num = 12
name = 'Sam'
print('My number is: {one}, and my name is: {two}'.format(one=num,two=name))
print('My number is: {}, and my name is: {}'.format(num,name))
[1,2,3]
['hi',1,[1,2]]
my_list = ['a','b','c']
my_list.append('d')
my_list
my_list[0]
my_list[1]
my_list[1:]
my_list[:1]
my_list[0] = 'NEW'
my_list
nest = [1,2,3,[4,5,['target']]]
nest[3]
nest[3][2]
nest[3][2][0]
d = {'key1':'item1','key2':'item2'}
d
d['key1']
True
False
t = (1,2,3)
t[0]
t[0] = 'NEW'
{1,2,3}
{1,2,3,1,2,1,2,3,3,3,3,2,2,2,1,1,2}
1 > 2
1 < 2
1 >= 1
1 <= 4
1 == 1
'hi' == 'bye'
(1 > 2) and (2 < 3)
(1 > 2) or (2 < 3)
(1 == 2) or (2 == 3) or (4 == 4)
if 1 < 2:
print('Yep!')
if 1 < 2:
print('yep!')
if 1 < 2:
print('first')
else:
print('last')
if 1 > 2:
print('first')
else:
print('last')
if 1 == 2:
print('first')
elif 3 == 3:
print('middle')
else:
print('Last')
seq = [1,2,3,4,5]
for item in seq:
print(item)
for item in seq:
print('Yep')
for jelly in seq:
print(jelly+jelly)
i = 1
while i < 5:
print('i is: {}'.format(i))
i = i+1
range(5)
for i in range(5):
print(i)
list(range(5))
x = [1,2,3,4]
out = []
for item in x:
out.append(item**2)
print(out)
[item**2 for item in x]
def my_func(param1='default'):
"""
Docstring goes here.
"""
print(param1)
my_func
my_func()
my_func('new param')
my_func(param1='new param')
def square(x):
return x**2
out = square(2)
print(out)
def times2(var):
return var*2
times2(2)
lambda var: var*2
seq = [1,2,3,4,5]
map(times2,seq)
list(map(times2,seq))
list(map(lambda var: var*2,seq))
filter(lambda item: item%2 == 0,seq)
list(filter(lambda item: item%2 == 0,seq))
st = 'hello my name is Sam'
st.lower()
st.upper()
st.split()
tweet = 'Go Sports! #Sports'
tweet.split('#')
tweet.split('#')[1]
d
d.keys()
d.items()
lst = [1,2,3]
lst.pop()
lst
'x' in [1,2,3]
'x' in ['x','y','z']
| 0.179064 | 0.904102 |
```
# default_exp classifier
#hide
from nbdev import showdoc
```
# Create training data
from the chess board image to FEN app
```
#export
from PIL import Image
from collections import namedtuple
from typing import Tuple
from pathlib import Path
from IPython import display
from enum import Enum
import os
from functools import reduce
from operator import iconcat
#export
Coord = namedtuple('coord', 'file rank')
CropBox = Tuple[float, float, float, float]
```
## Color
```
#export
class Color(Enum):
white = 0
black = 1
```
## Piece
```
#export
class Piece(Enum):
bishop = 0
knight = 1
rook = 2
queen = 3
king = 4
pawn = 5
empty = 6
```
## BoardImage
```
#export
class BoardImage:
""" Represents a perfectly cropped image of a chessboard
Attributes:
path: url of image
image: loaded PIL image from path
"""
image: Image
def __init__(self, path):
""" only works for starting position, with white on bottom"""
self.path = path
self.image = Image.open(path)
_coordinates = {Piece.king: ['e1', 'e8'],
Piece.queen: ['d1', 'd8'],
Piece.knight: ['b1', 'g1', 'b8', 'g8'],
Piece.rook: ['a1', 'h1', 'a8', 'h8'],
Piece.bishop: ['c1', 'f1', 'c8', 'f8'],
Piece.pawn: ['a2', 'b2', 'c2', 'd2', 'e2', 'f2', 'g2', 'h2',
'a7', 'b7', 'c7', 'd7', 'e7', 'f7', 'g7', 'h7'],
Piece.empty: ['a3', 'b3', 'c3', 'd3', 'e3', 'f3', 'g3', 'h3',
'a4', 'b4', 'c4', 'd4', 'e4', 'f4', 'g4', 'h4',
'a5', 'b5', 'c5', 'd5', 'e5', 'f5', 'g5', 'h5',
'a6', 'b6', 'c6', 'd6', 'e6', 'f6', 'g6', 'h6']
}
def _get_crop_box(self, coord: Coord) -> CropBox:
"""
coord: board coordinate, a8, h3, f4, etc
height: float of board image height
width: float of board image width
output: rectangle cooridates for a8, h3, f4
"""
width = self.image.width
height = self.image.height
origin_y = (8 - int(coord.rank)) * height / 8
origin_x = int(ord(coord.file) - ord('a')) * width / 8
return (origin_x, origin_y, origin_x + width / 8, origin_y + height / 8)
def get_square(self, coord):
coord = get_coord(coord)
cropbox = self._get_crop_box(coord)
return self.image.crop(cropbox)
def get_pieces(self, piece):
return [self.get_square(_) for _ in self._coordinates[piece]]
def get_all_pieces(self):
raise NotImplemented
def get_black_pieces(self, color):
raise NotImplemented
def get_white_pieces(self, piece, color):
raise NotImplemented
def get_empty_squares(self):
raise NotImplemented
def __repr__(self):
return self.path.as_posix()
```
```
#export
def get_coord(name) -> Coord:
""" name: 2 character coordinate of board square. A8, h3, f4, etc """
file, rank = name.lower()[0], name.lower()[1]
assert file in ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']
assert rank in ['1', '2', '3', '4', '5', '6', '7', '8']
return Coord(rank=rank, file=file)
board = Path('data/_board/CleanShot 2021-03-28 at 21.36.07@2x.png')
board = Path('/Users/id/nassync/2051-chess-vision-blog-post/board.png')
bi = BoardImage(board)
pieces = reduce(iconcat, [bi.get_pieces(o) for o in Piece], [])
output = Path('data/blogpost/')
if not output.exists():
output.mkdir()
_ = [o.save(output/f"{i}.png") for i, o in enumerate(pieces)]
```
|
github_jupyter
|
# default_exp classifier
#hide
from nbdev import showdoc
#export
from PIL import Image
from collections import namedtuple
from typing import Tuple
from pathlib import Path
from IPython import display
from enum import Enum
import os
from functools import reduce
from operator import iconcat
#export
Coord = namedtuple('coord', 'file rank')
CropBox = Tuple[float, float, float, float]
#export
class Color(Enum):
white = 0
black = 1
#export
class Piece(Enum):
bishop = 0
knight = 1
rook = 2
queen = 3
king = 4
pawn = 5
empty = 6
#export
class BoardImage:
""" Represents a perfectly cropped image of a chessboard
Attributes:
path: url of image
image: loaded PIL image from path
"""
image: Image
def __init__(self, path):
""" only works for starting position, with white on bottom"""
self.path = path
self.image = Image.open(path)
_coordinates = {Piece.king: ['e1', 'e8'],
Piece.queen: ['d1', 'd8'],
Piece.knight: ['b1', 'g1', 'b8', 'g8'],
Piece.rook: ['a1', 'h1', 'a8', 'h8'],
Piece.bishop: ['c1', 'f1', 'c8', 'f8'],
Piece.pawn: ['a2', 'b2', 'c2', 'd2', 'e2', 'f2', 'g2', 'h2',
'a7', 'b7', 'c7', 'd7', 'e7', 'f7', 'g7', 'h7'],
Piece.empty: ['a3', 'b3', 'c3', 'd3', 'e3', 'f3', 'g3', 'h3',
'a4', 'b4', 'c4', 'd4', 'e4', 'f4', 'g4', 'h4',
'a5', 'b5', 'c5', 'd5', 'e5', 'f5', 'g5', 'h5',
'a6', 'b6', 'c6', 'd6', 'e6', 'f6', 'g6', 'h6']
}
def _get_crop_box(self, coord: Coord) -> CropBox:
"""
coord: board coordinate, a8, h3, f4, etc
height: float of board image height
width: float of board image width
output: rectangle cooridates for a8, h3, f4
"""
width = self.image.width
height = self.image.height
origin_y = (8 - int(coord.rank)) * height / 8
origin_x = int(ord(coord.file) - ord('a')) * width / 8
return (origin_x, origin_y, origin_x + width / 8, origin_y + height / 8)
def get_square(self, coord):
coord = get_coord(coord)
cropbox = self._get_crop_box(coord)
return self.image.crop(cropbox)
def get_pieces(self, piece):
return [self.get_square(_) for _ in self._coordinates[piece]]
def get_all_pieces(self):
raise NotImplemented
def get_black_pieces(self, color):
raise NotImplemented
def get_white_pieces(self, piece, color):
raise NotImplemented
def get_empty_squares(self):
raise NotImplemented
def __repr__(self):
return self.path.as_posix()
#export
def get_coord(name) -> Coord:
""" name: 2 character coordinate of board square. A8, h3, f4, etc """
file, rank = name.lower()[0], name.lower()[1]
assert file in ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']
assert rank in ['1', '2', '3', '4', '5', '6', '7', '8']
return Coord(rank=rank, file=file)
board = Path('data/_board/CleanShot 2021-03-28 at 21.36.07@2x.png')
board = Path('/Users/id/nassync/2051-chess-vision-blog-post/board.png')
bi = BoardImage(board)
pieces = reduce(iconcat, [bi.get_pieces(o) for o in Piece], [])
output = Path('data/blogpost/')
if not output.exists():
output.mkdir()
_ = [o.save(output/f"{i}.png") for i, o in enumerate(pieces)]
| 0.821689 | 0.838084 |
```
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os,sys
import plyfile
import numpy as np
import argparse
import h5py
import liblas
args = argparse.Namespace(
# datafolder = '../data/Miami-Dade/LAS',
datafolder = '../data/Amsterdam/Utrecht-test-2'
)
import time
start_time_dict = {}
total_time_dict = {}
current_time_ms = lambda:int(round(time.time()*1000))
def timer_start(msg):
global start_time_dict
global total_time_dict
start_time_dict[msg] = current_time_ms()
if not msg in total_time_dict:
total_time_dict[msg] = 0
def timer_pause(msg):
global start_time_dict
global total_time_dict
total_time_dict[msg] += current_time_ms() - start_time_dict[msg]
def timer_stop(msg):
global total_time_dict
timer_pause(msg)
print("{} completed in {}ms".format(msg, total_time_dict[msg]))
total_time_dict[msg] = 0
def read_xyz_label_from_las(filename_las):
msg = 'Loading {}...'.format(filename_las)
timer_start(msg)
f = liblas.file.File(filename_las, mode='r')
h = f.header
xyzirgb_num = h.point_records_count
xyz = np.ndarray((xyzirgb_num, 3))
labels = np.ndarray(xyzirgb_num, np.int16)
i = 0
for p in f:
xyz[i] = [p.x, p.y, p.z]
labels[i] = p.classification
i += 1
timer_stop(msg)
return xyz, labels, xyzirgb_num
def save_xyz_label_to_las(filename_las, xyz, labels):
msg = 'Saving {}...'.format(filename_las)
timer_start(msg)
h = liblas.header.Header()
h.dataformat_id = 1
h.major = 1
h.minor = 2
h.min = np.min(xyz, axis=0)
h.max = np.max(xyz, axis=0)
h.scale = [1e-3, 1e-3, 1e-3]
f = liblas.file.File(filename_las, mode='w', header=h)
for i in range(xyz.shape[0]):
p = liblas.point.Point()
p.x = xyz[i,0] / h.scale[0]
p.y = xyz[i,1] / h.scale[1]
p.z = xyz[i,2] / h.scale[2]
p.classification = labels[i]
p.color = liblas.color.Color()
p.intensity = 100
p.return_number = 1
p.number_of_returns = 1
p.scan_direction = 1
p.scan_angle = 0
f.write(p)
# if i > 10000:
# break
f.close()
timer_stop(msg)
def get_pred_prefixes(datafolder):
fs = os.listdir(datafolder)
preds = []
for f in fs:
if f[-8:] == '_pred.h5':
preds += [f]
pred_pfx = []
for p in preds:
if '_zero' in p:
pred_pfx += [p.split('_zero')[0]]
return np.unique(pred_pfx)
# parser = argparse.ArgumentParser()
# parser.add_argument('--datafolder', '-d', help='Path to input *_pred.h5', required=True)
# parser.add_argument('--version', '-v', help='full or reduced', type=str, required=True)
# args = parser.parse_args()
# print(args)
SAVE_TO_EXT = '.las'
LOAD_FROM_EXT = '.las'
# SAVE_TO_EXT = '.txt'
# LOAD_FROM_EXT = '.txt'
categories_list = get_pred_prefixes(args.datafolder)
# categories_list = ['amsterdam_las_' + ('0000' + str(category))[-5:] for category in length_dict]
# categories_list = [category for category in length_dict]
print(categories_list)
for category in categories_list:
output_path = os.path.join(args.datafolder,"results",category + "_pred" + SAVE_TO_EXT)
if not os.path.exists(os.path.join(args.datafolder,"results")):
os.makedirs(os.path.join(args.datafolder,"results"))
pred_list = [pred for pred in os.listdir(args.datafolder)
if category in pred and pred.split(".")[0].split("_")[-1] == 'pred']
print('pred_list: {}'.format(pred_list))
# label_length = length_dict[category][0]
# merged_label = np.zeros((label_length),dtype=int)
# merged_confidence = np.zeros((label_length),dtype=float)
merged_label = None
merged_confidence = None
for pred_file in pred_list:
print(os.path.join(args.datafolder, pred_file))
data = h5py.File(os.path.join(args.datafolder, pred_file))
labels_seg = data['label_seg'][...].astype(np.int64)
indices = data['indices_split_to_full'][...].astype(np.int64)
confidence = data['confidence'][...].astype(np.float32)
data_num = data['data_num'][...].astype(np.int64)
# print('labels_seg: {}'.format(labels_seg.shape))
# print('indices: {}'.format(indices.shape))
# print('confidence: {}'.format(confidence.shape))
# print('data_num: {}'.format(data_num.shape))
if merged_label is None:
#calculating how many labels need to be there in the output
label_length = 0
for i in range(indices.shape[0]):
label_length = np.max([label_length, np.max(indices[i][:data_num[i]])])
label_length += 1
merged_label = np.zeros((label_length),dtype=int)
merged_confidence = np.zeros((label_length),dtype=float)
else:
label_length2 = 0
for i in range(indices.shape[0]):
label_length2 = np.max([label_length2, np.max(indices[i][:data_num[i]])])
label_length2 += 1
if label_length < label_length2:
#expaning labels and confidence arrays, as the new file appears having mode of them
for i in range(label_length2 - label_length):
merged_label = np.append(merged_label, 0)
merged_confidence = np.append(merged_confidence, 0.0)
label_length = label_length2
for i in range(labels_seg.shape[0]):
temp_label = np.zeros((data_num[i]),dtype=int)
pred_confidence = confidence[i][:data_num[i]]
temp_confidence = merged_confidence[indices[i][:data_num[i]]]
temp_label[temp_confidence >= pred_confidence] = merged_label[indices[i][:data_num[i]]][temp_confidence >= pred_confidence]
temp_label[pred_confidence > temp_confidence] = labels_seg[i][:data_num[i]][pred_confidence > temp_confidence]
merged_confidence[indices[i][:data_num[i]][pred_confidence > temp_confidence]] = pred_confidence[pred_confidence > temp_confidence]
merged_label[indices[i][:data_num[i]]] = temp_label
if len(pred_list) > 0:
#concatenating source points with the final labels and writing out resulting file
final_labels = np.ndarray((merged_label.shape[0], 1), np.int64)
final_labels[:,0] = merged_label# + 1
points_path = os.path.join(args.datafolder, category + LOAD_FROM_EXT)
if LOAD_FROM_EXT == ".las":
points, _, _ = read_xyz_label_from_las(points_path)
else:
print('Reading {}'.format(points_path))
points = np.loadtxt(points_path)
if SAVE_TO_EXT == '.las':
save_xyz_label_to_las(output_path, points, final_labels)
else:
final = np.concatenate([points, final_labels], axis=-1)
print('Writing {}'.format(output_path))
np.savetxt(output_path, final, fmt='%1.3f %1.3f %1.3f %i %i')
print('Done.')
```
|
github_jupyter
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os,sys
import plyfile
import numpy as np
import argparse
import h5py
import liblas
args = argparse.Namespace(
# datafolder = '../data/Miami-Dade/LAS',
datafolder = '../data/Amsterdam/Utrecht-test-2'
)
import time
start_time_dict = {}
total_time_dict = {}
current_time_ms = lambda:int(round(time.time()*1000))
def timer_start(msg):
global start_time_dict
global total_time_dict
start_time_dict[msg] = current_time_ms()
if not msg in total_time_dict:
total_time_dict[msg] = 0
def timer_pause(msg):
global start_time_dict
global total_time_dict
total_time_dict[msg] += current_time_ms() - start_time_dict[msg]
def timer_stop(msg):
global total_time_dict
timer_pause(msg)
print("{} completed in {}ms".format(msg, total_time_dict[msg]))
total_time_dict[msg] = 0
def read_xyz_label_from_las(filename_las):
msg = 'Loading {}...'.format(filename_las)
timer_start(msg)
f = liblas.file.File(filename_las, mode='r')
h = f.header
xyzirgb_num = h.point_records_count
xyz = np.ndarray((xyzirgb_num, 3))
labels = np.ndarray(xyzirgb_num, np.int16)
i = 0
for p in f:
xyz[i] = [p.x, p.y, p.z]
labels[i] = p.classification
i += 1
timer_stop(msg)
return xyz, labels, xyzirgb_num
def save_xyz_label_to_las(filename_las, xyz, labels):
msg = 'Saving {}...'.format(filename_las)
timer_start(msg)
h = liblas.header.Header()
h.dataformat_id = 1
h.major = 1
h.minor = 2
h.min = np.min(xyz, axis=0)
h.max = np.max(xyz, axis=0)
h.scale = [1e-3, 1e-3, 1e-3]
f = liblas.file.File(filename_las, mode='w', header=h)
for i in range(xyz.shape[0]):
p = liblas.point.Point()
p.x = xyz[i,0] / h.scale[0]
p.y = xyz[i,1] / h.scale[1]
p.z = xyz[i,2] / h.scale[2]
p.classification = labels[i]
p.color = liblas.color.Color()
p.intensity = 100
p.return_number = 1
p.number_of_returns = 1
p.scan_direction = 1
p.scan_angle = 0
f.write(p)
# if i > 10000:
# break
f.close()
timer_stop(msg)
def get_pred_prefixes(datafolder):
fs = os.listdir(datafolder)
preds = []
for f in fs:
if f[-8:] == '_pred.h5':
preds += [f]
pred_pfx = []
for p in preds:
if '_zero' in p:
pred_pfx += [p.split('_zero')[0]]
return np.unique(pred_pfx)
# parser = argparse.ArgumentParser()
# parser.add_argument('--datafolder', '-d', help='Path to input *_pred.h5', required=True)
# parser.add_argument('--version', '-v', help='full or reduced', type=str, required=True)
# args = parser.parse_args()
# print(args)
SAVE_TO_EXT = '.las'
LOAD_FROM_EXT = '.las'
# SAVE_TO_EXT = '.txt'
# LOAD_FROM_EXT = '.txt'
categories_list = get_pred_prefixes(args.datafolder)
# categories_list = ['amsterdam_las_' + ('0000' + str(category))[-5:] for category in length_dict]
# categories_list = [category for category in length_dict]
print(categories_list)
for category in categories_list:
output_path = os.path.join(args.datafolder,"results",category + "_pred" + SAVE_TO_EXT)
if not os.path.exists(os.path.join(args.datafolder,"results")):
os.makedirs(os.path.join(args.datafolder,"results"))
pred_list = [pred for pred in os.listdir(args.datafolder)
if category in pred and pred.split(".")[0].split("_")[-1] == 'pred']
print('pred_list: {}'.format(pred_list))
# label_length = length_dict[category][0]
# merged_label = np.zeros((label_length),dtype=int)
# merged_confidence = np.zeros((label_length),dtype=float)
merged_label = None
merged_confidence = None
for pred_file in pred_list:
print(os.path.join(args.datafolder, pred_file))
data = h5py.File(os.path.join(args.datafolder, pred_file))
labels_seg = data['label_seg'][...].astype(np.int64)
indices = data['indices_split_to_full'][...].astype(np.int64)
confidence = data['confidence'][...].astype(np.float32)
data_num = data['data_num'][...].astype(np.int64)
# print('labels_seg: {}'.format(labels_seg.shape))
# print('indices: {}'.format(indices.shape))
# print('confidence: {}'.format(confidence.shape))
# print('data_num: {}'.format(data_num.shape))
if merged_label is None:
#calculating how many labels need to be there in the output
label_length = 0
for i in range(indices.shape[0]):
label_length = np.max([label_length, np.max(indices[i][:data_num[i]])])
label_length += 1
merged_label = np.zeros((label_length),dtype=int)
merged_confidence = np.zeros((label_length),dtype=float)
else:
label_length2 = 0
for i in range(indices.shape[0]):
label_length2 = np.max([label_length2, np.max(indices[i][:data_num[i]])])
label_length2 += 1
if label_length < label_length2:
#expaning labels and confidence arrays, as the new file appears having mode of them
for i in range(label_length2 - label_length):
merged_label = np.append(merged_label, 0)
merged_confidence = np.append(merged_confidence, 0.0)
label_length = label_length2
for i in range(labels_seg.shape[0]):
temp_label = np.zeros((data_num[i]),dtype=int)
pred_confidence = confidence[i][:data_num[i]]
temp_confidence = merged_confidence[indices[i][:data_num[i]]]
temp_label[temp_confidence >= pred_confidence] = merged_label[indices[i][:data_num[i]]][temp_confidence >= pred_confidence]
temp_label[pred_confidence > temp_confidence] = labels_seg[i][:data_num[i]][pred_confidence > temp_confidence]
merged_confidence[indices[i][:data_num[i]][pred_confidence > temp_confidence]] = pred_confidence[pred_confidence > temp_confidence]
merged_label[indices[i][:data_num[i]]] = temp_label
if len(pred_list) > 0:
#concatenating source points with the final labels and writing out resulting file
final_labels = np.ndarray((merged_label.shape[0], 1), np.int64)
final_labels[:,0] = merged_label# + 1
points_path = os.path.join(args.datafolder, category + LOAD_FROM_EXT)
if LOAD_FROM_EXT == ".las":
points, _, _ = read_xyz_label_from_las(points_path)
else:
print('Reading {}'.format(points_path))
points = np.loadtxt(points_path)
if SAVE_TO_EXT == '.las':
save_xyz_label_to_las(output_path, points, final_labels)
else:
final = np.concatenate([points, final_labels], axis=-1)
print('Writing {}'.format(output_path))
np.savetxt(output_path, final, fmt='%1.3f %1.3f %1.3f %i %i')
print('Done.')
| 0.292494 | 0.189427 |
```
# Importing libraries
import networkx as nx
import matplotlib.pyplot as plt
```
# Erdos_Renyi_Graph
### Task : Create a random erdos_renyi_graph with 100 nodes and a edge probability of 0.3
```
# Create a random erdos-renyi graph with 100 nodes and a edge probability of 0.3
G1 = nx.erdos_renyi_graph(100, 0.3, directed = False)
# title of graph
plt.title('Erdos Renyi Graph with 100 nodes and 0.3 edge probability ')
# plot graph G1
nx.draw(G1, with_labels = True)
```
### Task :Plot the degree_centrality distribution (a histogram of the degree centrality values).
```
# compute the degree centrality of graph G1. The function returns a dictionary keyed by node with bipartite degree centrality as the value.
degree_centrality_values=nx.degree_centrality(G1)
# plot a histogram of the degree centrality values
plt.figure()
plt.hist(list(degree_centrality_values.values()),edgecolor='black')
plt.title('Degree Centrality Values of Erdos Renyi Graph with 100 Nodes and 0.3 Edge Probability',fontdict={'fontsize':15})
plt.xlabel('Degree')
plt.ylabel('Fraction of Nodes')
plt.show()
```
### Task :Change the edge probability of the erdos-reyni graph to 0.6 and plot the degree centrality distribution again.
```
G2 = nx.erdos_renyi_graph(100, 0.6, directed = False)
# plot graph G2
plt.title('Erdos Renyi Graph with 100 nodes and 0.6 edge probability')
nx.draw(G2, with_labels = True)
```
### Task :Plot the degree_centrality distribution (a histogram of the degree centrality values).
```
# compute the degree centrality of graph G2. The function returns a dictionary keyed by node with bipartite degree centrality as the value.
degree_centrality_values2=nx.degree_centrality(G2)
# plot a histogram of the degree centrality values
plt.figure()
plt.hist(list(degree_centrality_values2.values()),edgecolor='black')
plt.title('Degree Centrality Values of Erdos Renyi Graph with 100 Nodes and 0.6 Edge Probability',fontdict={'fontsize':15})
plt.xlabel('Degree')
plt.ylabel('Fraction of Nodes')
plt.show()
```
### Task: Infer the changes in the plots, why does the degree centrality distribution change. Create a markdown cell in the jupyter notebook and write what you can infer from the plots.
* The degree centrality of a node in a graph is simply a count of the number of edges that connect to it.The edge probability is the probability for edge creation, the larger the edge probability, the more edges are potentially to be created and the larger probability to have greater degree centrality.
* The difference between the two graphs above is as a result to their difference in edge probability, the first graph have smaller edge probability i.e m=0.3 and the second have higher edge probability i.e m=0.6 which can be seen in their respective histograms.
### Task: Create a random barabasi_albert_graph with 100 nodes and attach 3 edges to a new node in the graph (m=3).
```
# Create a random barabasi_albert_graph with 100 nodes and attach 3 edges to a new node in the graph (m=3).
G3=nx.barabasi_albert_graph(100, 3)
# title of graph
plt.title('Barabasi Albert Graph with 100 nodes and 3 edges')
# plot graph G3
nx.draw(G3, with_labels = True)
```
### Task: Plot the degree_centrality distribution (a histogram of the degree centrality values).
```
# compute the degree centrality of graph G3. The function returns a dictionary keyed by node with bipartite degree centrality as the value.
degree_centrality_values3=nx.degree_centrality(G3)
# plot a histogram of the degree centrality values
plt.figure()
plt.hist(list(degree_centrality_values3.values()),edgecolor='black')
plt.title('Degree Centrality Values of Barabasi Albert Graph with 100 Nodes and 3 Edge Probability',fontdict={'fontsize':15})
plt.xlabel('Degree')
plt.ylabel('Fraction of Nodes')
plt.show()
```
### Infer the changes in the plots, why does the degree centrality distribution change between these 2 random graph generators Create a markdown cell in the jupyter notebook and write what you can infer from the plots.
#### Diffenece between erdos_renyi_graph and barabasi_albert_graph.
* According to networkX documentation the p parameter in erdos_renyi_graph(n, p, seed=None, directed=False) function means probabilty for edge creation while the m parameter in barabasi_albert_graph(n, m, seed=None, initial_graph=None) function means number of edges to attach from a new node to existing nodes.
#### Why does the degree centrality distribution changes between erdos_renyi graph and barabasi_albert graph?
* In barabasi_albert graph if an existing node has enough edges, the new node could possibly not be able to attach new edges to this node, whereas in erdos_renyi graph for each edge with this node, it has the same probability of being created or not.Which causes the graph of barabasi_albert_graph() function to right-skewed as less edges being added and less nodes being of importance whereas the erdos_renyi graph is more likely to normal distribution.
|
github_jupyter
|
# Importing libraries
import networkx as nx
import matplotlib.pyplot as plt
# Create a random erdos-renyi graph with 100 nodes and a edge probability of 0.3
G1 = nx.erdos_renyi_graph(100, 0.3, directed = False)
# title of graph
plt.title('Erdos Renyi Graph with 100 nodes and 0.3 edge probability ')
# plot graph G1
nx.draw(G1, with_labels = True)
# compute the degree centrality of graph G1. The function returns a dictionary keyed by node with bipartite degree centrality as the value.
degree_centrality_values=nx.degree_centrality(G1)
# plot a histogram of the degree centrality values
plt.figure()
plt.hist(list(degree_centrality_values.values()),edgecolor='black')
plt.title('Degree Centrality Values of Erdos Renyi Graph with 100 Nodes and 0.3 Edge Probability',fontdict={'fontsize':15})
plt.xlabel('Degree')
plt.ylabel('Fraction of Nodes')
plt.show()
G2 = nx.erdos_renyi_graph(100, 0.6, directed = False)
# plot graph G2
plt.title('Erdos Renyi Graph with 100 nodes and 0.6 edge probability')
nx.draw(G2, with_labels = True)
# compute the degree centrality of graph G2. The function returns a dictionary keyed by node with bipartite degree centrality as the value.
degree_centrality_values2=nx.degree_centrality(G2)
# plot a histogram of the degree centrality values
plt.figure()
plt.hist(list(degree_centrality_values2.values()),edgecolor='black')
plt.title('Degree Centrality Values of Erdos Renyi Graph with 100 Nodes and 0.6 Edge Probability',fontdict={'fontsize':15})
plt.xlabel('Degree')
plt.ylabel('Fraction of Nodes')
plt.show()
# Create a random barabasi_albert_graph with 100 nodes and attach 3 edges to a new node in the graph (m=3).
G3=nx.barabasi_albert_graph(100, 3)
# title of graph
plt.title('Barabasi Albert Graph with 100 nodes and 3 edges')
# plot graph G3
nx.draw(G3, with_labels = True)
# compute the degree centrality of graph G3. The function returns a dictionary keyed by node with bipartite degree centrality as the value.
degree_centrality_values3=nx.degree_centrality(G3)
# plot a histogram of the degree centrality values
plt.figure()
plt.hist(list(degree_centrality_values3.values()),edgecolor='black')
plt.title('Degree Centrality Values of Barabasi Albert Graph with 100 Nodes and 3 Edge Probability',fontdict={'fontsize':15})
plt.xlabel('Degree')
plt.ylabel('Fraction of Nodes')
plt.show()
| 0.735167 | 0.989327 |
## Observations and Insights
Observations & inferences:
1) Drug regimens Capomulin & Ramicane were tested more than the other drug regimens. All other drug regimens were tested relatively the same amount.
2) The mice's final tumor volume tested with drug regimens Capomulin, Ramicane & Ceftamine are normal - there are no significant outliers. Infubinol, on the other hand, does have outliers as noted in the graphic below and computed potential outliers.
3) Weight and average tumor volume for mice tested with Capomulin has a positive correlation - as mice weight increases, so does the average tumor volume.
## Dependencies and starter code
```
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats as st
import numpy as np
from scipy.stats import linregress
# Study data files
mouse_metadata = "data/Mouse_metadata.csv"
study_results = "data/Study_results.csv"
# Read the mouse data and the study results
mouse_metadata = pd.read_csv(mouse_metadata)
study_results = pd.read_csv(study_results)
# Combine the data into a single dataset
all_data = pd.merge(mouse_metadata, study_results, how="left", on=["Mouse ID", "Mouse ID"])
all_data.head(15)
```
## Summary statistics
```
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
tumor_data_mean = all_data.groupby("Drug Regimen").mean()["Tumor Volume (mm3)"]
tumor_data_median = all_data.groupby("Drug Regimen").median()["Tumor Volume (mm3)"]
tumor_data_variance = all_data.groupby("Drug Regimen").var()["Tumor Volume (mm3)"]
tumor_data_std = all_data.groupby("Drug Regimen").std()["Tumor Volume (mm3)"]
tumor_data_SEM = all_data.groupby("Drug Regimen").sem()["Tumor Volume (mm3)"]
tumor_data_summary = pd.DataFrame({"Tumor Volume Mean":tumor_data_mean,
"Tumor Volume Median":tumor_data_median,
"Tumor Volume Variance":tumor_data_variance,
"Tumor Volume Std Dev":tumor_data_std,
"Tumor Volume SEM":tumor_data_SEM})
tumor_data_summary
```
## Bar plots
```
# Generate a bar plot showing number of data points for each treatment regimen using pandas
x_axis = all_data["Drug Regimen"].unique()
y_axis = all_data["Drug Regimen"].value_counts()
drug_data = pd.DataFrame({"Drug Regimen": x_axis, "Number of Data Points": y_axis})
drug_data = drug_data.set_index("Drug Regimen")
drug_data.plot(kind="bar", facecolor="blue", alpha=0.5, figsize=(15,5))
plt.title("Number of Data Points per Drug Regimen")
plt.show()
# Generate a bar plot showing number of data points for each treatment regimen using pyplot
x_axis = all_data["Drug Regimen"].unique()
y_axis = all_data["Drug Regimen"].value_counts()
plt.figure(figsize=(15,5))
plt.bar(x_axis, y_axis, color="b", alpha=0.5, align="center")
plt.title("Number of Data Points per Drug Regimen")
plt.xlabel("Drug Regimen")
plt.ylabel("Number of Data Points")
plt.show()
```
## Pie plots
```
# Generate a pie plot showing the distribution of female versus male mice using pandas
sex_type = all_data["Sex"].unique()
sex_count = all_data["Sex"].value_counts()
mice_pie=sex_count.plot(kind="pie", y=sex_type, autopct="%1.1f%%", title="Sex of Mice")
mice_pie.set_ylabel("Sex")
plt.tight_layout()
plt.axis("equal")
# Generate a pie plot showing the distribution of female versus male mice using pyplot
sex_type = all_data["Sex"].unique()
sex_count = all_data["Sex"].value_counts()
colors = ["orange", "blue"]
explode = (0.1,0)
plt.pie(sex_count, explode=explode, labels=sex_type, colors=colors, autopct="%1.1f%%", shadow=True, startangle=140)
plt.axis("equal")
plt.title("Sex of Mice")
plt.ylabel("Sex")
```
## Quartiles, outliers and boxplots
```
# Calculate the final tumor volume of each mouse across four of the most promising treatment regimens.
# Calculate the IQR and quantitatively determine if there are any potential outliers.
# Most promising treatment regimens: Capomulin, Ramicane, Infubinol, and Ceftamin
final_volume = all_data.groupby(["Mouse ID"]).max()
final_volume = final_volume.reset_index()
merged_data = final_volume[["Mouse ID","Timepoint"]].merge(all_data,on=["Mouse ID","Timepoint"],how="left")
capomulin_data = merged_data.loc[(merged_data["Drug Regimen"]=="Capomulin")]["Tumor Volume (mm3)"]
ramicane_data = merged_data.loc[(merged_data["Drug Regimen"]=="Ramicane")]["Tumor Volume (mm3)"]
infubinol_data = merged_data.loc[(merged_data["Drug Regimen"]=="Infubinol")]["Tumor Volume (mm3)"]
ceftamin_data = merged_data.loc[(merged_data["Drug Regimen"]=="Ceftamin")]["Tumor Volume (mm3)"]
#Capomulin
cap_quartiles = capomulin_data.quantile([.25, .5, .75])
cap_lowerq = cap_quartiles[0.25]
cap_upperq = cap_quartiles[0.75]
cap_iqr = cap_upperq - cap_lowerq
cap_lower_bound = cap_lowerq - (1.5*cap_iqr)
cap_upper_bound = cap_upperq + (1.5*cap_iqr)
print(f"Capomulin IQR is {cap_iqr}")
print(f"Capomulin potential outliers: {capomulin_data.loc[(capomulin_data < cap_lower_bound) | (capomulin_data > cap_upper_bound)]}")
#Ramicane
ram_quartiles = ramicane_data.quantile([.25, .5, .75])
ram_lowerq = ram_quartiles[0.25]
ram_upperq = ram_quartiles[0.75]
ram_iqr = ram_upperq - ram_lowerq
ram_lower_bound = ram_lowerq - (1.5*ram_iqr)
ram_upper_bound = ram_upperq + (1.5*ram_iqr)
print(f"Ramicane IQR is {ram_iqr}")
print(f"Ramicane potential outliers: {ramicane_data.loc[(ramicane_data < ram_lower_bound) | (ramicane_data > ram_upper_bound)]}")
#Infubinol
inf_quartiles = infubinol_data.quantile([.25, .5, .75])
inf_lowerq = inf_quartiles[0.25]
inf_upperq = inf_quartiles[0.75]
inf_iqr = inf_upperq - inf_lowerq
inf_lower_bound = inf_lowerq - (1.5*inf_iqr)
inf_upper_bound = inf_upperq + (1.5*inf_iqr)
print(f"Infubinol IQR is {inf_iqr}")
print(f"Infubinol potential outliers: {infubinol_data.loc[(infubinol_data < inf_lower_bound) | (infubinol_data > inf_upper_bound)]}")
#Ceftamin
ceft_quartiles = ceftamin_data.quantile([.25, .5, .75])
ceft_lowerq = ceft_quartiles[0.25]
ceft_upperq = ceft_quartiles[0.75]
ceft_iqr = ceft_upperq - ceft_lowerq
ceft_lower_bound = ceft_lowerq - (1.5*ceft_iqr)
ceft_upper_bound = ceft_upperq + (1.5*ceft_iqr)
print(f"Ceftamine IQR is {ceft_iqr}")
print(f"Ceftamin potential outliers: {ceftamin_data.loc[(ceftamin_data < ceft_lower_bound) | (ceftamin_data > ceft_upper_bound)]}")
# Generate a box plot of the final tumor volume of each mouse across four regimens of interest
# Highlight any potential outliers in the plot by changing their color and style.
# Most promising treatment regimens: Capomulin, Ramicane, Infubinol, and Ceftamin
outlier_color = dict(markerfacecolor='red',markersize=12)
fig1, ax1 = plt.subplots()
ax1.set_title("Final Tumor Volume per Mouse Across Top 4 Drug Regimens")
ax1.set_ylabel("Final Tumor Volume (mm3)")
ax1.boxplot([capomulin_data,ramicane_data,infubinol_data,ceftamin_data],
labels=["Capomulin", "Ramicane", "Infubinol", "Ceftamine"],
flierprops=outlier_color)
plt.show()
```
## Line and scatter plots
```
# Generate a line plot of time point versus tumor volume for a mouse treated with Capomulin
capomulin_data = all_data.loc[(all_data["Drug Regimen"]=="Capomulin")]
mouse_track = capomulin_data.loc[(capomulin_data["Mouse ID"]=="s185")]
x_axis = mouse_track["Timepoint"]
y_axis = mouse_track["Tumor Volume (mm3)"]
plt.plot(x_axis, y_axis)
plt.xlabel("Timepoint")
plt.ylabel("Tumor Volume (mm3)")
plt.title(f'Capomulin Impact on Mouse ID s185 Tumor Volume over Time')
plt.show()
# Generate a scatter plot of mouse weight versus average tumor volume for the Capomulin regimen
capomulin_data = all_data.loc[(all_data["Drug Regimen"]=="Capomulin")]
mouse_ID = capomulin_data.groupby("Mouse ID").mean()
x_axis = mouse_ID["Weight (g)"]
y_axis = mouse_ID["Tumor Volume (mm3)"]
plt.scatter(x_axis, y_axis, marker="o", facecolors="blue", s=x_axis, alpha = 0.75)
plt.xlabel("Mouse Weight (g)")
plt.ylabel("Average Tumor Volume (mm3)")
plt.title(f'Capomulin Impact on Average Tumor Volume per Mouse Weight')
# Calculate the correlation coefficient and linear regression model for mouse weight and average tumor volume for the Capomulin regimen
capomulin_data = all_data.loc[(all_data["Drug Regimen"]=="Capomulin")]
mouse_ID = capomulin_data.groupby("Mouse ID").mean()
x_axis = mouse_ID["Weight (g)"]
y_axis = mouse_ID["Tumor Volume (mm3)"]
correlation = st.pearsonr(x_axis, y_axis)
print(f"The correlation between Weight & Average Tumor Volume is {round(correlation[0],2)}")
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_axis, y_axis)
regress_values = x_axis * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_axis, y_axis, marker="o", facecolors="blue", s=x_axis, alpha = 0.75)
plt.plot(x_axis, regress_values, "r-")
plt.annotate(line_eq,(18,38),fontsize=15, color="red")
plt.xlabel("Mouse Weight (g)")
plt.ylabel("Average Tumor Volume (mm3)")
plt.title(f'Capomulin Impact on Average Tumor Volume per Mouse Weight')
plt.show()
```
|
github_jupyter
|
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats as st
import numpy as np
from scipy.stats import linregress
# Study data files
mouse_metadata = "data/Mouse_metadata.csv"
study_results = "data/Study_results.csv"
# Read the mouse data and the study results
mouse_metadata = pd.read_csv(mouse_metadata)
study_results = pd.read_csv(study_results)
# Combine the data into a single dataset
all_data = pd.merge(mouse_metadata, study_results, how="left", on=["Mouse ID", "Mouse ID"])
all_data.head(15)
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
tumor_data_mean = all_data.groupby("Drug Regimen").mean()["Tumor Volume (mm3)"]
tumor_data_median = all_data.groupby("Drug Regimen").median()["Tumor Volume (mm3)"]
tumor_data_variance = all_data.groupby("Drug Regimen").var()["Tumor Volume (mm3)"]
tumor_data_std = all_data.groupby("Drug Regimen").std()["Tumor Volume (mm3)"]
tumor_data_SEM = all_data.groupby("Drug Regimen").sem()["Tumor Volume (mm3)"]
tumor_data_summary = pd.DataFrame({"Tumor Volume Mean":tumor_data_mean,
"Tumor Volume Median":tumor_data_median,
"Tumor Volume Variance":tumor_data_variance,
"Tumor Volume Std Dev":tumor_data_std,
"Tumor Volume SEM":tumor_data_SEM})
tumor_data_summary
# Generate a bar plot showing number of data points for each treatment regimen using pandas
x_axis = all_data["Drug Regimen"].unique()
y_axis = all_data["Drug Regimen"].value_counts()
drug_data = pd.DataFrame({"Drug Regimen": x_axis, "Number of Data Points": y_axis})
drug_data = drug_data.set_index("Drug Regimen")
drug_data.plot(kind="bar", facecolor="blue", alpha=0.5, figsize=(15,5))
plt.title("Number of Data Points per Drug Regimen")
plt.show()
# Generate a bar plot showing number of data points for each treatment regimen using pyplot
x_axis = all_data["Drug Regimen"].unique()
y_axis = all_data["Drug Regimen"].value_counts()
plt.figure(figsize=(15,5))
plt.bar(x_axis, y_axis, color="b", alpha=0.5, align="center")
plt.title("Number of Data Points per Drug Regimen")
plt.xlabel("Drug Regimen")
plt.ylabel("Number of Data Points")
plt.show()
# Generate a pie plot showing the distribution of female versus male mice using pandas
sex_type = all_data["Sex"].unique()
sex_count = all_data["Sex"].value_counts()
mice_pie=sex_count.plot(kind="pie", y=sex_type, autopct="%1.1f%%", title="Sex of Mice")
mice_pie.set_ylabel("Sex")
plt.tight_layout()
plt.axis("equal")
# Generate a pie plot showing the distribution of female versus male mice using pyplot
sex_type = all_data["Sex"].unique()
sex_count = all_data["Sex"].value_counts()
colors = ["orange", "blue"]
explode = (0.1,0)
plt.pie(sex_count, explode=explode, labels=sex_type, colors=colors, autopct="%1.1f%%", shadow=True, startangle=140)
plt.axis("equal")
plt.title("Sex of Mice")
plt.ylabel("Sex")
# Calculate the final tumor volume of each mouse across four of the most promising treatment regimens.
# Calculate the IQR and quantitatively determine if there are any potential outliers.
# Most promising treatment regimens: Capomulin, Ramicane, Infubinol, and Ceftamin
final_volume = all_data.groupby(["Mouse ID"]).max()
final_volume = final_volume.reset_index()
merged_data = final_volume[["Mouse ID","Timepoint"]].merge(all_data,on=["Mouse ID","Timepoint"],how="left")
capomulin_data = merged_data.loc[(merged_data["Drug Regimen"]=="Capomulin")]["Tumor Volume (mm3)"]
ramicane_data = merged_data.loc[(merged_data["Drug Regimen"]=="Ramicane")]["Tumor Volume (mm3)"]
infubinol_data = merged_data.loc[(merged_data["Drug Regimen"]=="Infubinol")]["Tumor Volume (mm3)"]
ceftamin_data = merged_data.loc[(merged_data["Drug Regimen"]=="Ceftamin")]["Tumor Volume (mm3)"]
#Capomulin
cap_quartiles = capomulin_data.quantile([.25, .5, .75])
cap_lowerq = cap_quartiles[0.25]
cap_upperq = cap_quartiles[0.75]
cap_iqr = cap_upperq - cap_lowerq
cap_lower_bound = cap_lowerq - (1.5*cap_iqr)
cap_upper_bound = cap_upperq + (1.5*cap_iqr)
print(f"Capomulin IQR is {cap_iqr}")
print(f"Capomulin potential outliers: {capomulin_data.loc[(capomulin_data < cap_lower_bound) | (capomulin_data > cap_upper_bound)]}")
#Ramicane
ram_quartiles = ramicane_data.quantile([.25, .5, .75])
ram_lowerq = ram_quartiles[0.25]
ram_upperq = ram_quartiles[0.75]
ram_iqr = ram_upperq - ram_lowerq
ram_lower_bound = ram_lowerq - (1.5*ram_iqr)
ram_upper_bound = ram_upperq + (1.5*ram_iqr)
print(f"Ramicane IQR is {ram_iqr}")
print(f"Ramicane potential outliers: {ramicane_data.loc[(ramicane_data < ram_lower_bound) | (ramicane_data > ram_upper_bound)]}")
#Infubinol
inf_quartiles = infubinol_data.quantile([.25, .5, .75])
inf_lowerq = inf_quartiles[0.25]
inf_upperq = inf_quartiles[0.75]
inf_iqr = inf_upperq - inf_lowerq
inf_lower_bound = inf_lowerq - (1.5*inf_iqr)
inf_upper_bound = inf_upperq + (1.5*inf_iqr)
print(f"Infubinol IQR is {inf_iqr}")
print(f"Infubinol potential outliers: {infubinol_data.loc[(infubinol_data < inf_lower_bound) | (infubinol_data > inf_upper_bound)]}")
#Ceftamin
ceft_quartiles = ceftamin_data.quantile([.25, .5, .75])
ceft_lowerq = ceft_quartiles[0.25]
ceft_upperq = ceft_quartiles[0.75]
ceft_iqr = ceft_upperq - ceft_lowerq
ceft_lower_bound = ceft_lowerq - (1.5*ceft_iqr)
ceft_upper_bound = ceft_upperq + (1.5*ceft_iqr)
print(f"Ceftamine IQR is {ceft_iqr}")
print(f"Ceftamin potential outliers: {ceftamin_data.loc[(ceftamin_data < ceft_lower_bound) | (ceftamin_data > ceft_upper_bound)]}")
# Generate a box plot of the final tumor volume of each mouse across four regimens of interest
# Highlight any potential outliers in the plot by changing their color and style.
# Most promising treatment regimens: Capomulin, Ramicane, Infubinol, and Ceftamin
outlier_color = dict(markerfacecolor='red',markersize=12)
fig1, ax1 = plt.subplots()
ax1.set_title("Final Tumor Volume per Mouse Across Top 4 Drug Regimens")
ax1.set_ylabel("Final Tumor Volume (mm3)")
ax1.boxplot([capomulin_data,ramicane_data,infubinol_data,ceftamin_data],
labels=["Capomulin", "Ramicane", "Infubinol", "Ceftamine"],
flierprops=outlier_color)
plt.show()
# Generate a line plot of time point versus tumor volume for a mouse treated with Capomulin
capomulin_data = all_data.loc[(all_data["Drug Regimen"]=="Capomulin")]
mouse_track = capomulin_data.loc[(capomulin_data["Mouse ID"]=="s185")]
x_axis = mouse_track["Timepoint"]
y_axis = mouse_track["Tumor Volume (mm3)"]
plt.plot(x_axis, y_axis)
plt.xlabel("Timepoint")
plt.ylabel("Tumor Volume (mm3)")
plt.title(f'Capomulin Impact on Mouse ID s185 Tumor Volume over Time')
plt.show()
# Generate a scatter plot of mouse weight versus average tumor volume for the Capomulin regimen
capomulin_data = all_data.loc[(all_data["Drug Regimen"]=="Capomulin")]
mouse_ID = capomulin_data.groupby("Mouse ID").mean()
x_axis = mouse_ID["Weight (g)"]
y_axis = mouse_ID["Tumor Volume (mm3)"]
plt.scatter(x_axis, y_axis, marker="o", facecolors="blue", s=x_axis, alpha = 0.75)
plt.xlabel("Mouse Weight (g)")
plt.ylabel("Average Tumor Volume (mm3)")
plt.title(f'Capomulin Impact on Average Tumor Volume per Mouse Weight')
# Calculate the correlation coefficient and linear regression model for mouse weight and average tumor volume for the Capomulin regimen
capomulin_data = all_data.loc[(all_data["Drug Regimen"]=="Capomulin")]
mouse_ID = capomulin_data.groupby("Mouse ID").mean()
x_axis = mouse_ID["Weight (g)"]
y_axis = mouse_ID["Tumor Volume (mm3)"]
correlation = st.pearsonr(x_axis, y_axis)
print(f"The correlation between Weight & Average Tumor Volume is {round(correlation[0],2)}")
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_axis, y_axis)
regress_values = x_axis * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_axis, y_axis, marker="o", facecolors="blue", s=x_axis, alpha = 0.75)
plt.plot(x_axis, regress_values, "r-")
plt.annotate(line_eq,(18,38),fontsize=15, color="red")
plt.xlabel("Mouse Weight (g)")
plt.ylabel("Average Tumor Volume (mm3)")
plt.title(f'Capomulin Impact on Average Tumor Volume per Mouse Weight')
plt.show()
| 0.606265 | 0.980858 |
```
from libraries.import_export_data_objects import import_export_data as Import_Export_Data
from libraries.altair_renderings import AltairRenderings
from libraries.utility import Utility
import os
import altair as alt
my_altair = AltairRenderings()
my_altair.my_new_map()
source = alt.topo_feature(data.world_110m.url, 'countries')
my_map=alt.Chart(source).mark_geoshape(fill='blue',stroke='grey',).encode(tooltip='id:N').project('naturalEarth1').properties(width=800, height=600).configure_view(stroke=None)
!pip install geopandas
!pip install gpdvega
!pip install configparser
import altair as alt
import geopandas as gpd
import gpdvega
import numpy as np
import matplotlib.pyplot as plt
import configparser
world = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres'))
interested_countries = ['Australia','Brazil','Canada','China','France','Germany','India','Indonesia','Iran','Italy','Japan',
'Mexico','Netherlands','Russia','Saudi Arabia','South Korea','Spain','Switzerland','United Kingdom','United States of America']
gdp = list(range(100,300,10))
country_gdp = pd.DataFrame(
{'name': interested_countries,
'GDP': gdp
})
world_gdp = pd.merge(world, country_gdp, on='name', how = 'outer')
world_gdp['GDP'] = world_gdp['GDP'].fillna(0)
world_gdp.head()
# GeoDataFrame could be passed as usual pd.DataFrame
chart = alt.Chart(world_gdp[world_gdp.continent!='Antarctica']).mark_geoshape(
).project(
).encode(
color='GDP',
tooltip='name'
).properties(
width=700,
height=500
)
chart
from libraries.import_export_data_objects import import_export_data as Import_Export_Data
from libraries.altair_renderings import AltairRenderings
from libraries.utility import Utility
import os
import altair as alt
import pandas as pd
my_altair = AltairRenderings()
my_data = Import_Export_Data()
all_gdp=my_data.get_gdp_all_data()
year2020 = all_gdp[all_gdp['Year'] == 2020]
top20_2020 = year2020.sort_values(['GDP'], ascending=False).head(20)[['Country','GDP']]
len(top20_2020)
from vega_datasets import data
world_map_source = alt.topo_feature(data.world_110m.url, 'countries')
country_source = my_data.get_world_countries_by_iso_label()
country_source.loc[84,'Country'] = 'South Korea'
country_source = country_source.drop(4)
country_gdp = pd.merge(country_source, top20_2020, on='Country', how = 'outer')
country_gdp['GDP'] = country_gdp['GDP'].fillna(0)
chart = alt.Chart(world_map_source).mark_geoshape(
stroke='black',
strokeWidth=1.10
).project('mercator'
).encode(
color = alt.condition(
'datum.GDP > 0',
alt.Color('GDP:Q',legend=alt.Legend(title="County GDP in $MM")),
alt.value('lightgrey')
),
tooltip=[alt.Tooltip("Country:N", title="Country")]
).transform_lookup(
lookup="id",
from_=alt.LookupData(country_gdp, "id", ["Country",'GDP']),
).properties(
width=700,
height=500
)
chart
```
|
github_jupyter
|
from libraries.import_export_data_objects import import_export_data as Import_Export_Data
from libraries.altair_renderings import AltairRenderings
from libraries.utility import Utility
import os
import altair as alt
my_altair = AltairRenderings()
my_altair.my_new_map()
source = alt.topo_feature(data.world_110m.url, 'countries')
my_map=alt.Chart(source).mark_geoshape(fill='blue',stroke='grey',).encode(tooltip='id:N').project('naturalEarth1').properties(width=800, height=600).configure_view(stroke=None)
!pip install geopandas
!pip install gpdvega
!pip install configparser
import altair as alt
import geopandas as gpd
import gpdvega
import numpy as np
import matplotlib.pyplot as plt
import configparser
world = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres'))
interested_countries = ['Australia','Brazil','Canada','China','France','Germany','India','Indonesia','Iran','Italy','Japan',
'Mexico','Netherlands','Russia','Saudi Arabia','South Korea','Spain','Switzerland','United Kingdom','United States of America']
gdp = list(range(100,300,10))
country_gdp = pd.DataFrame(
{'name': interested_countries,
'GDP': gdp
})
world_gdp = pd.merge(world, country_gdp, on='name', how = 'outer')
world_gdp['GDP'] = world_gdp['GDP'].fillna(0)
world_gdp.head()
# GeoDataFrame could be passed as usual pd.DataFrame
chart = alt.Chart(world_gdp[world_gdp.continent!='Antarctica']).mark_geoshape(
).project(
).encode(
color='GDP',
tooltip='name'
).properties(
width=700,
height=500
)
chart
from libraries.import_export_data_objects import import_export_data as Import_Export_Data
from libraries.altair_renderings import AltairRenderings
from libraries.utility import Utility
import os
import altair as alt
import pandas as pd
my_altair = AltairRenderings()
my_data = Import_Export_Data()
all_gdp=my_data.get_gdp_all_data()
year2020 = all_gdp[all_gdp['Year'] == 2020]
top20_2020 = year2020.sort_values(['GDP'], ascending=False).head(20)[['Country','GDP']]
len(top20_2020)
from vega_datasets import data
world_map_source = alt.topo_feature(data.world_110m.url, 'countries')
country_source = my_data.get_world_countries_by_iso_label()
country_source.loc[84,'Country'] = 'South Korea'
country_source = country_source.drop(4)
country_gdp = pd.merge(country_source, top20_2020, on='Country', how = 'outer')
country_gdp['GDP'] = country_gdp['GDP'].fillna(0)
chart = alt.Chart(world_map_source).mark_geoshape(
stroke='black',
strokeWidth=1.10
).project('mercator'
).encode(
color = alt.condition(
'datum.GDP > 0',
alt.Color('GDP:Q',legend=alt.Legend(title="County GDP in $MM")),
alt.value('lightgrey')
),
tooltip=[alt.Tooltip("Country:N", title="Country")]
).transform_lookup(
lookup="id",
from_=alt.LookupData(country_gdp, "id", ["Country",'GDP']),
).properties(
width=700,
height=500
)
chart
| 0.374219 | 0.303664 |
```
%load_ext autoreload
import pandas as pd
%autoreload 2
import sys
sys.path.append("/Users/timothyxp/libs/ppscore/src/")
import ppscore as pps
import warnings
import seaborn as sns
import numpy as np
sns.set()
import matplotlib.pyplot as plt
warnings.filterwarnings("ignore")
df = pd.read_csv('autos.csv', parse_dates=['dateCreated', 'lastSeen'])
print(df.shape)
df.head()
%%time
pps_matrix = pps.matrix(df, verbose=True, n_jobs=-1)
ax, fig = plt.subplots(figsize=(10, 10))
sns.heatmap(
pps_matrix.pivot("y", "x", "ppscore"),
vmin=0,
vmax=1,
cmap="Blues",
linewidths=0.5
)
```
кроме банальных вещей видно что фичи fuelType, gearbox,powerPS и vehicleType хорошо предсказываются по модели,
потому что уникальных значений там не так много
Видно что хорошая зависимость у цены только с 6-ю фичами. Одна из них model, которой 250 уникальных значений
проверим на синтетических данных как он ловит нелинейные зависимости
```
test_df = pd.DataFrame(np.arange(-50, 50), columns=["feature_0"])
test_df["feature_1"] = test_df["feature_0"] ** 2
test_df['feature_2'] = test_df['feature_0'] ** 3
nonl_df = pps.nonlinear_features(test_df, plot_nonlinear_features=True)
fraud_df = pd.read_csv("datasets/santander_value_pred/TRAIN.csv")
print(fraud_df.shape)
fraud_df.head()
predictors = pps.predictors(fraud_df, 'TARGET', verbose=True)
predictors.head()
predictors.ppscore.mean()
```
видно что для фрода совсем не дает ничего, скорее всего тут фичи в совокупности важны а не по одиночке
```
house_prices = pd.read_csv("datasets/house_prices/TRAIN.csv")
print(house_prices.shape)
house_prices.head()
{col:(house_prices[col].unique().shape[0], house_prices.dtypes[col]) for col in house_prices}
```
видно что некоторые по сути категориальные переменные имеют тип int, во всех сложно разбираться, возьмем категориальными там где значений уникальных меньше 20
```
def cast_to_cat(df, border=20):
for col in df:
if df.dtypes[col].kind == 'i' and np.unique(df[col]).shape[0] <= 20:
df[col] = df[col].astype('category')
return df
house_prices = house_prices.pipe(cast_to_cat)
predictors = pps.predictors(house_prices, 'TARGET', verbose=True)
```
по сути получаем что-то типо feature importance до построения модели
Тут конечно нельзя утверждать что фичи которые имееют pps=0 не нужны для модели, возможно они раскрываются только в комбинации с другими фичами, ppscore это не учитывает
```
fig, ax = plt.subplots(figsize=(16, 8))
sns.barplot(data=predictors[predictors.ppscore.gt(0)], x="x", y="ppscore", ax=ax)
plt.xticks(rotation=60);
%%time
good_features = predictors[predictors.ppscore.gt(0)].x.tolist()[:20] + ["TARGET"]
print(len(good_features))
pps_matrix = pps.matrix(house_prices[good_features], n_jobs=-1)
fig, ax = plt.subplots(figsize=(14, 14))
sns.heatmap(
pps_matrix.pivot("y", "x", "ppscore"),
vmin=0,
vmax=1,
cmap="Blues",
linewidths=0.5,
ax=ax
)
```
посмотрим на некоторые распределения где есть судя по матрице хорошая зафисимость.
Первое - зависимость типа материала от года
```
sns.jointplot(
data=house_prices,
y='Foundation',
x='YearBuilt'
)
```
Зависимость года постройки от района, видно когда застраивались районы
```
sns.jointplot(
data=house_prices,
x='YearBuilt',
y='Neighborhood',
height=12
)
nonl_df = pps.nonlinear_features(house_prices[good_features], plot_nonlinear_features=True, pos_border=0.4)
```
видно что много фичей где корреляция сильная но при этом очень большой шум, ppscore не видит зависимостей
```
titanic_df = pd.read_csv("datasets/titanic/train.csv")
print(titanic_df.shape)
titanic_df.head()
```
тут важно задавать категориальные переменные, потому что сам он может решить что это регрессия и не получить никакой зависимости, а она может быть
```
cat_features = ['SibSp', 'Parch', 'Ticket', 'Cabin', 'Embarked', 'Pclass']
for col in cat_features:
titanic_df[col] = titanic_df[col].astype('category')
%%time
pps_matrix = pps.matrix(titanic_df, verbose=True, n_jobs=-1)
fig, ax = plt.subplots(figsize=(10, 10))
sns.heatmap(
pps_matrix.pivot("y", "x", "ppscore"),
vmin=0,
vmax=1,
cmap="Blues",
linewidths=0.5,
ax=ax
)
```
впринципе видна зависимость зависимость таргеты от пола и от билета(тут скорее всего произошел лик, потому что уникальных значений билетов оч много)
Так же видны четкие зависимости Класса от цены билета. Еще видна зависимость порта посадки от цены билета
```
sns.jointplot(
data=titanic_df.dropna(subset=['Embarked']),
y='Embarked',
x='Fare',
height=8
)
```
посмотрим также на матрицу корреляций(только для численных фичей)
```
ax, fig = plt.subplots(figsize=(10, 10))
sns.heatmap(
titanic_df.corr(),
vmin=0,
vmax=1,
cmap="Blues",
linewidths=0.5
)
nonl_df = pps.nonlinear_features(titanic_df, plot_nonlinear_features=True, pos_border=0.1)
```
тут нелинейных фичей не нашлось, численнных фичей впринципе мало
|
github_jupyter
|
%load_ext autoreload
import pandas as pd
%autoreload 2
import sys
sys.path.append("/Users/timothyxp/libs/ppscore/src/")
import ppscore as pps
import warnings
import seaborn as sns
import numpy as np
sns.set()
import matplotlib.pyplot as plt
warnings.filterwarnings("ignore")
df = pd.read_csv('autos.csv', parse_dates=['dateCreated', 'lastSeen'])
print(df.shape)
df.head()
%%time
pps_matrix = pps.matrix(df, verbose=True, n_jobs=-1)
ax, fig = plt.subplots(figsize=(10, 10))
sns.heatmap(
pps_matrix.pivot("y", "x", "ppscore"),
vmin=0,
vmax=1,
cmap="Blues",
linewidths=0.5
)
test_df = pd.DataFrame(np.arange(-50, 50), columns=["feature_0"])
test_df["feature_1"] = test_df["feature_0"] ** 2
test_df['feature_2'] = test_df['feature_0'] ** 3
nonl_df = pps.nonlinear_features(test_df, plot_nonlinear_features=True)
fraud_df = pd.read_csv("datasets/santander_value_pred/TRAIN.csv")
print(fraud_df.shape)
fraud_df.head()
predictors = pps.predictors(fraud_df, 'TARGET', verbose=True)
predictors.head()
predictors.ppscore.mean()
house_prices = pd.read_csv("datasets/house_prices/TRAIN.csv")
print(house_prices.shape)
house_prices.head()
{col:(house_prices[col].unique().shape[0], house_prices.dtypes[col]) for col in house_prices}
def cast_to_cat(df, border=20):
for col in df:
if df.dtypes[col].kind == 'i' and np.unique(df[col]).shape[0] <= 20:
df[col] = df[col].astype('category')
return df
house_prices = house_prices.pipe(cast_to_cat)
predictors = pps.predictors(house_prices, 'TARGET', verbose=True)
fig, ax = plt.subplots(figsize=(16, 8))
sns.barplot(data=predictors[predictors.ppscore.gt(0)], x="x", y="ppscore", ax=ax)
plt.xticks(rotation=60);
%%time
good_features = predictors[predictors.ppscore.gt(0)].x.tolist()[:20] + ["TARGET"]
print(len(good_features))
pps_matrix = pps.matrix(house_prices[good_features], n_jobs=-1)
fig, ax = plt.subplots(figsize=(14, 14))
sns.heatmap(
pps_matrix.pivot("y", "x", "ppscore"),
vmin=0,
vmax=1,
cmap="Blues",
linewidths=0.5,
ax=ax
)
sns.jointplot(
data=house_prices,
y='Foundation',
x='YearBuilt'
)
sns.jointplot(
data=house_prices,
x='YearBuilt',
y='Neighborhood',
height=12
)
nonl_df = pps.nonlinear_features(house_prices[good_features], plot_nonlinear_features=True, pos_border=0.4)
titanic_df = pd.read_csv("datasets/titanic/train.csv")
print(titanic_df.shape)
titanic_df.head()
cat_features = ['SibSp', 'Parch', 'Ticket', 'Cabin', 'Embarked', 'Pclass']
for col in cat_features:
titanic_df[col] = titanic_df[col].astype('category')
%%time
pps_matrix = pps.matrix(titanic_df, verbose=True, n_jobs=-1)
fig, ax = plt.subplots(figsize=(10, 10))
sns.heatmap(
pps_matrix.pivot("y", "x", "ppscore"),
vmin=0,
vmax=1,
cmap="Blues",
linewidths=0.5,
ax=ax
)
sns.jointplot(
data=titanic_df.dropna(subset=['Embarked']),
y='Embarked',
x='Fare',
height=8
)
ax, fig = plt.subplots(figsize=(10, 10))
sns.heatmap(
titanic_df.corr(),
vmin=0,
vmax=1,
cmap="Blues",
linewidths=0.5
)
nonl_df = pps.nonlinear_features(titanic_df, plot_nonlinear_features=True, pos_border=0.1)
| 0.348756 | 0.797004 |
<a href="https://colab.research.google.com/github/sorayabailey23/Group-93/blob/main/Assessor_Data_Cleaning_2015.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Assessor Data Cleaning - 2015
---
```
#importing libraries + drive
from google.colab import drive
drive.mount('/content/gdrive')
import numpy as np
import pandas as pd
import math
```
##Uploading + Exploring the Data
I'll first upload the raw data and see what features it includes.
```
#Importing data from 2014 in Los Angeles
records_df = pd.read_csv("gdrive/My Drive/Team 93 Capstone Project/data/Assessor_Parcels_Data_2015.csv")
records_df.head()
records_df.columns
#verifying that we pulled data for the correct city + year
records_df["taxratearea_city"].unique()
records_df["rollyear"].unique()
records_df.info()
```
## Relevance
The dataset includes a lot of information that I don't need. For this section, I'll
1. Drop the columns that I don't need
2. Check the remaining columns for unique values
1. Handle missing values, including dropping data that does not include zip codes
###Determining which columns to drop
```
#dropping irrelevant columns
records_df = records_df.drop(columns=['Unnamed: 0', 'situszip','ain',
'taxratearea', 'usetype', 'usecode','usecodedescchar3', 'usecodedescchar4',
'totbuildingdatalines', 'yearbuilt', 'effectiveyearbuilt', 'sqftmain',
'bedrooms', 'bathrooms', 'units', 'recordingdate', 'roll_landvalue',
'roll_landbaseyear', 'roll_impvalue', 'roll_impbaseyear',
'roll_totlandimp', 'roll_homeownersexemp', 'roll_realestateexemp',
'roll_fixturevalue', 'roll_fixtureexemp', 'roll_perspropvalue',
'roll_perspropexemp', 'istaxableparcel', 'roll_totalvalue',
'roll_totalexemption',
'adminregion', 'cluster', 'parcelboundarydescription', 'situshouseno',
'situsfraction', 'situsdirection', 'situsstreet', 'situsunit',
'situscity', 'rowid', 'center_lat', 'center_lon', 'location_1','propertylocation', "parcelclassification"])
records_df.head()
#renaming columns to more recongizable names
records_df.columns=["city", "year", "assessor_id","general_use", "specific_use","net_value", "zip_code"]
```
###Handling Missing Values in ["assessor_id"] (none!)
```
len(records_df['assessor_id'].unique())
records_df['assessor_id'].isnull().sum()
#this series is good to go
```
### Handling Missing Values in ["general_use"]
```
#we have entries w/ nan and (unavailable) string
records_df["general_use"].unique()
#checking how many nans in general_use
records_df['general_use'].isnull().sum()
#replacing nan with (unavailable)
records_df["general_use"] = records_df["general_use"].fillna('(unavailable)')
records_df["general_use"].unique()
```
###Handling Missing Values in ["specific_use"]
```
#there is an empty string in this column that we should replace with nan or Not Available
records_df["specific_use"].unique()
records_df["specific_use"]= records_df["specific_use"].fillna('(unavailable)')
records_df["specific_use"].unique()
```
###Handling Missing Values in ["net_value"]
```
#this is good to go
records_df['net_value'].isnull().sum()
```
###Handling Missing Values in ["zip_code"]
```
#good to go
records_df['zip_code'].unique()
```
###Dropping Data w/o Zip Codes
```
records_df = records_df[records_df['zip_code'].notna()]
records_df = records_df.reset_index()
records_df = records_df.drop(columns=['index'])
```
##Consistency
Now that I've handles missing values, I will check consistency over:
1. Data type
2. Categorical
2. Referential integrity
###Data type
```
records_df.info()
#changing to category type
records_df["assessor_id"] = records_df["assessor_id"].astype("category")
records_df["general_use"] = records_df["general_use"].astype("category")
records_df["specific_use"] = records_df["specific_use"].astype("category")
#since all NaN have been removed, change type of zip code column to int. this may change later
records_df["zip_code"] = records_df["zip_code"].astype("int")
#change year to datetime format
records_df["year"] = pd.to_datetime(records_df['year'], format='%Y')
records_df["year"] = records_df["year"].dt.year
records_df
records_df.info()
```
###Categorical Consistency
```
#consistent
records_df["general_use"].unique()
#consistent
records_df["specific_use"]
```
### Referential integrity
```
#making the assessor_id a unique ID per parcel per year
records_df['assessor_id'] = records_df["assessor_id"].apply(lambda x: x +"-2015")
records_df
```
##Exporting Data
```
records_df.to_csv("gdrive/My Drive/Team 93 Capstone Project/data/Assessor_Parcels_Data_2015_CLEANED.csv")
```
|
github_jupyter
|
#importing libraries + drive
from google.colab import drive
drive.mount('/content/gdrive')
import numpy as np
import pandas as pd
import math
#Importing data from 2014 in Los Angeles
records_df = pd.read_csv("gdrive/My Drive/Team 93 Capstone Project/data/Assessor_Parcels_Data_2015.csv")
records_df.head()
records_df.columns
#verifying that we pulled data for the correct city + year
records_df["taxratearea_city"].unique()
records_df["rollyear"].unique()
records_df.info()
#dropping irrelevant columns
records_df = records_df.drop(columns=['Unnamed: 0', 'situszip','ain',
'taxratearea', 'usetype', 'usecode','usecodedescchar3', 'usecodedescchar4',
'totbuildingdatalines', 'yearbuilt', 'effectiveyearbuilt', 'sqftmain',
'bedrooms', 'bathrooms', 'units', 'recordingdate', 'roll_landvalue',
'roll_landbaseyear', 'roll_impvalue', 'roll_impbaseyear',
'roll_totlandimp', 'roll_homeownersexemp', 'roll_realestateexemp',
'roll_fixturevalue', 'roll_fixtureexemp', 'roll_perspropvalue',
'roll_perspropexemp', 'istaxableparcel', 'roll_totalvalue',
'roll_totalexemption',
'adminregion', 'cluster', 'parcelboundarydescription', 'situshouseno',
'situsfraction', 'situsdirection', 'situsstreet', 'situsunit',
'situscity', 'rowid', 'center_lat', 'center_lon', 'location_1','propertylocation', "parcelclassification"])
records_df.head()
#renaming columns to more recongizable names
records_df.columns=["city", "year", "assessor_id","general_use", "specific_use","net_value", "zip_code"]
len(records_df['assessor_id'].unique())
records_df['assessor_id'].isnull().sum()
#this series is good to go
#we have entries w/ nan and (unavailable) string
records_df["general_use"].unique()
#checking how many nans in general_use
records_df['general_use'].isnull().sum()
#replacing nan with (unavailable)
records_df["general_use"] = records_df["general_use"].fillna('(unavailable)')
records_df["general_use"].unique()
#there is an empty string in this column that we should replace with nan or Not Available
records_df["specific_use"].unique()
records_df["specific_use"]= records_df["specific_use"].fillna('(unavailable)')
records_df["specific_use"].unique()
#this is good to go
records_df['net_value'].isnull().sum()
#good to go
records_df['zip_code'].unique()
records_df = records_df[records_df['zip_code'].notna()]
records_df = records_df.reset_index()
records_df = records_df.drop(columns=['index'])
records_df.info()
#changing to category type
records_df["assessor_id"] = records_df["assessor_id"].astype("category")
records_df["general_use"] = records_df["general_use"].astype("category")
records_df["specific_use"] = records_df["specific_use"].astype("category")
#since all NaN have been removed, change type of zip code column to int. this may change later
records_df["zip_code"] = records_df["zip_code"].astype("int")
#change year to datetime format
records_df["year"] = pd.to_datetime(records_df['year'], format='%Y')
records_df["year"] = records_df["year"].dt.year
records_df
records_df.info()
#consistent
records_df["general_use"].unique()
#consistent
records_df["specific_use"]
#making the assessor_id a unique ID per parcel per year
records_df['assessor_id'] = records_df["assessor_id"].apply(lambda x: x +"-2015")
records_df
records_df.to_csv("gdrive/My Drive/Team 93 Capstone Project/data/Assessor_Parcels_Data_2015_CLEANED.csv")
| 0.177775 | 0.890247 |
```
import math
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
```
### First of all, we need to get the parameters of the system
Speaking more specific, we should determine such factors:
* RSSI measurement at 1m (TX power, quite often can be found in documentation)
- the measurement should be done for different sensors
- the measurement should be done for several sensors at the same time, to get the picture on how the signals influence each other
- the measurement should be done for different orientation of the laptop (as it is not known where the ble module is located)
Then we can process the received datasets in the following way
* determine the environmental factors:
- https://hal.archives-ouvertes.fr/hal-01387824/document
- path loss index is calculated only using RSSI measurements, as it affects both transmitter and receiver (it should be calculated for each specific beacon, as it can be afffected even by laptop's screen)
- `n = (RSSI(d0) - RSSI(d)) / 10 * log(d/d0)`
* then we should come up with a formula that will be used to determine an estimated distance towards specific beacon (backbone of the whole system).
- The best outcome possible: come up with some dynamic calibration of it, as it is impossible to reach sufficient accuracy with a general-case formula (RSSI is hugely affected by the environmental propagation)
```
def get_path_loss_index(tx_benchmark_power: int, RSSI: int,
benchmark_distance: float, RSSI_measured_distance: float):
'''
determines n - path loss index which depends on the
propagation environment
'''
return (tx_benchmark_power - RSSI) / (10 * math.log(RSSI_measured_distance/benchmark_distance))
def get_distance_no_calibration(RSSI: int, tx_power: int):
return 0.89976 * (RSSI/tx_power)**7.7095 + 0.111
def get_distance(RSSI: int, tx_power: int, n: float):
'''
Uses Log-distance path loss model
'''
return 10**((tx_power - RSSI) / (10 * n))
def gets_stats(path_to_file: str, device: str):
df = pd.read_csv(path_to_file, sep=";")
print(df.head())
sens_rssi = df[df["DEVICE"] == device]["RSSI"].values.tolist()
sns.displot(sens_rssi)
plt.show()
MEAN_POWER = np.array(sens_rssi).mean()
MODE_POWER = stats.mode(sens_rssi)[0][0]
print(f"{MEAN_POWER=}")
print(f"{MODE_POWER=}")
return MEAN_POWER
tx_benchmark_power = gets_stats("one_meter.csv", "SmartParking1")
```
One meter to the BLE beacon. Screen is oriented in the opposite direction to the BLE beacon (this is important, as one can see later on)
```
two_meters_RSSI = gets_stats("two_meters.csv", "SmartParking1")
```
The same setup, but the distance is two meters, in order to determine the PATH_LOSS_INDEX.
```
PATH_LOSS_INDEX = get_path_loss_index(tx_benchmark_power, two_meters_RSSI, 1, 2)
PATH_LOSS_INDEX = round(PATH_LOSS_INDEX, 0) # what if make it integer number
get_distance(two_meters_RSSI, tx_benchmark_power, 1)
one_eighty = gets_stats("one_eighty.csv", "SmartParking1")
get_distance(one_eighty, tx_benchmark_power, get_path_loss_index(tx_benchmark_power, one_eighty, 1, 1.8))
```
As one can see, results are too far from what we want to see there, let's try different kind of setup.
Moreover, in the case of the previous distance estimation, the screen was slighlty rotated (there was a gap, where the direct signals could have reached). Let's try to conduct whole thing with the screen rotated towards the sensor
```
one = gets_stats("one_meter_to_screen.csv", "SmartParking1")
two = gets_stats("screen_to_sensor.csv", "SmartParking1")
PATH_LOSS = get_path_loss_index(one, two, 1, 2)
PATH_LOSS
get_distance(one_eighty, one, PATH_LOSS)
get_distance_no_calibration(one_eighty, one) # what if to use the
# general case formula, taken from Android Beacon library
```
## different sensor
```
gets_stats("dif_one.csv", "SmartParking2")
```
- we can see that both devices have the same RSSI at the distance of one meter.
|
github_jupyter
|
import math
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
def get_path_loss_index(tx_benchmark_power: int, RSSI: int,
benchmark_distance: float, RSSI_measured_distance: float):
'''
determines n - path loss index which depends on the
propagation environment
'''
return (tx_benchmark_power - RSSI) / (10 * math.log(RSSI_measured_distance/benchmark_distance))
def get_distance_no_calibration(RSSI: int, tx_power: int):
return 0.89976 * (RSSI/tx_power)**7.7095 + 0.111
def get_distance(RSSI: int, tx_power: int, n: float):
'''
Uses Log-distance path loss model
'''
return 10**((tx_power - RSSI) / (10 * n))
def gets_stats(path_to_file: str, device: str):
df = pd.read_csv(path_to_file, sep=";")
print(df.head())
sens_rssi = df[df["DEVICE"] == device]["RSSI"].values.tolist()
sns.displot(sens_rssi)
plt.show()
MEAN_POWER = np.array(sens_rssi).mean()
MODE_POWER = stats.mode(sens_rssi)[0][0]
print(f"{MEAN_POWER=}")
print(f"{MODE_POWER=}")
return MEAN_POWER
tx_benchmark_power = gets_stats("one_meter.csv", "SmartParking1")
two_meters_RSSI = gets_stats("two_meters.csv", "SmartParking1")
PATH_LOSS_INDEX = get_path_loss_index(tx_benchmark_power, two_meters_RSSI, 1, 2)
PATH_LOSS_INDEX = round(PATH_LOSS_INDEX, 0) # what if make it integer number
get_distance(two_meters_RSSI, tx_benchmark_power, 1)
one_eighty = gets_stats("one_eighty.csv", "SmartParking1")
get_distance(one_eighty, tx_benchmark_power, get_path_loss_index(tx_benchmark_power, one_eighty, 1, 1.8))
one = gets_stats("one_meter_to_screen.csv", "SmartParking1")
two = gets_stats("screen_to_sensor.csv", "SmartParking1")
PATH_LOSS = get_path_loss_index(one, two, 1, 2)
PATH_LOSS
get_distance(one_eighty, one, PATH_LOSS)
get_distance_no_calibration(one_eighty, one) # what if to use the
# general case formula, taken from Android Beacon library
gets_stats("dif_one.csv", "SmartParking2")
| 0.624523 | 0.900135 |
**Important: This notebook will only work with fastai-0.7.x. Do not try to run any fastai-1.x code from this path in the repository because it will load fastai-0.7.x**
## Dogs v Cats super-charged!
```
# Put these at the top of every notebook, to get automatic reloading and inline plotting
%reload_ext autoreload
%autoreload 2
%matplotlib inline
# This file contains all the main external libs we'll use
from fastai.imports import *
from fastai.transforms import *
from fastai.conv_learner import *
from fastai.model import *
from fastai.dataset import *
from fastai.sgdr import *
from fastai.plots import *
PATH = "data/dogscats/"
sz=299
arch=resnext50
bs=28
tfms = tfms_from_model(arch, sz, aug_tfms=transforms_side_on, max_zoom=1.1)
data = ImageClassifierData.from_paths(PATH, tfms=tfms, bs=bs, num_workers=4)
learn = ConvLearner.pretrained(arch, data, precompute=True, ps=0.5)
learn.fit(1e-2, 1)
learn.precompute=False
learn.fit(1e-2, 2, cycle_len=1)
learn.unfreeze()
lr=np.array([1e-4,1e-3,1e-2])
learn.fit(lr, 3, cycle_len=1)
learn.save('224_all_50')
learn.load('224_all_50')
log_preds,y = learn.TTA()
probs = np.mean(np.exp(log_preds),0)
accuracy_np(probs,y)
```
## Analyzing results
```
preds = np.argmax(probs, axis=1)
probs = probs[:,1]
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y, preds)
plot_confusion_matrix(cm, data.classes)
def rand_by_mask(mask): return np.random.choice(np.where(mask)[0], 4, replace=False)
def rand_by_correct(is_correct): return rand_by_mask((preds == data.val_y)==is_correct)
def plot_val_with_title(idxs, title):
imgs = np.stack([data.val_ds[x][0] for x in idxs])
title_probs = [probs[x] for x in idxs]
print(title)
return plots(data.val_ds.denorm(imgs), rows=1, titles=title_probs)
def plots(ims, figsize=(12,6), rows=1, titles=None):
f = plt.figure(figsize=figsize)
for i in range(len(ims)):
sp = f.add_subplot(rows, len(ims)//rows, i+1)
sp.axis('Off')
if titles is not None: sp.set_title(titles[i], fontsize=16)
plt.imshow(ims[i])
def load_img_id(ds, idx): return np.array(PIL.Image.open(PATH+ds.fnames[idx]))
def plot_val_with_title(idxs, title):
imgs = [load_img_id(data.val_ds,x) for x in idxs]
title_probs = [probs[x] for x in idxs]
print(title)
return plots(imgs, rows=1, titles=title_probs, figsize=(16,8))
def most_by_mask(mask, mult):
idxs = np.where(mask)[0]
return idxs[np.argsort(mult * probs[idxs])[:4]]
def most_by_correct(y, is_correct):
mult = -1 if (y==1)==is_correct else 1
return most_by_mask((preds == data.val_y)==is_correct & (data.val_y == y), mult)
plot_val_with_title(most_by_correct(0, False), "Most incorrect cats")
plot_val_with_title(most_by_correct(1, False), "Most incorrect dogs")
```
|
github_jupyter
|
# Put these at the top of every notebook, to get automatic reloading and inline plotting
%reload_ext autoreload
%autoreload 2
%matplotlib inline
# This file contains all the main external libs we'll use
from fastai.imports import *
from fastai.transforms import *
from fastai.conv_learner import *
from fastai.model import *
from fastai.dataset import *
from fastai.sgdr import *
from fastai.plots import *
PATH = "data/dogscats/"
sz=299
arch=resnext50
bs=28
tfms = tfms_from_model(arch, sz, aug_tfms=transforms_side_on, max_zoom=1.1)
data = ImageClassifierData.from_paths(PATH, tfms=tfms, bs=bs, num_workers=4)
learn = ConvLearner.pretrained(arch, data, precompute=True, ps=0.5)
learn.fit(1e-2, 1)
learn.precompute=False
learn.fit(1e-2, 2, cycle_len=1)
learn.unfreeze()
lr=np.array([1e-4,1e-3,1e-2])
learn.fit(lr, 3, cycle_len=1)
learn.save('224_all_50')
learn.load('224_all_50')
log_preds,y = learn.TTA()
probs = np.mean(np.exp(log_preds),0)
accuracy_np(probs,y)
preds = np.argmax(probs, axis=1)
probs = probs[:,1]
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y, preds)
plot_confusion_matrix(cm, data.classes)
def rand_by_mask(mask): return np.random.choice(np.where(mask)[0], 4, replace=False)
def rand_by_correct(is_correct): return rand_by_mask((preds == data.val_y)==is_correct)
def plot_val_with_title(idxs, title):
imgs = np.stack([data.val_ds[x][0] for x in idxs])
title_probs = [probs[x] for x in idxs]
print(title)
return plots(data.val_ds.denorm(imgs), rows=1, titles=title_probs)
def plots(ims, figsize=(12,6), rows=1, titles=None):
f = plt.figure(figsize=figsize)
for i in range(len(ims)):
sp = f.add_subplot(rows, len(ims)//rows, i+1)
sp.axis('Off')
if titles is not None: sp.set_title(titles[i], fontsize=16)
plt.imshow(ims[i])
def load_img_id(ds, idx): return np.array(PIL.Image.open(PATH+ds.fnames[idx]))
def plot_val_with_title(idxs, title):
imgs = [load_img_id(data.val_ds,x) for x in idxs]
title_probs = [probs[x] for x in idxs]
print(title)
return plots(imgs, rows=1, titles=title_probs, figsize=(16,8))
def most_by_mask(mask, mult):
idxs = np.where(mask)[0]
return idxs[np.argsort(mult * probs[idxs])[:4]]
def most_by_correct(y, is_correct):
mult = -1 if (y==1)==is_correct else 1
return most_by_mask((preds == data.val_y)==is_correct & (data.val_y == y), mult)
plot_val_with_title(most_by_correct(0, False), "Most incorrect cats")
plot_val_with_title(most_by_correct(1, False), "Most incorrect dogs")
| 0.629091 | 0.745653 |
```
import warnings
warnings.filterwarnings("ignore")
import pandas as pd #pandas library
import numpy as np
import nltk
import string
from bs4 import BeautifulSoup
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import confusion_matrix
from sklearn import metrics
from sklearn.metrics import roc_curve, auc
from nltk.stem.porter import PorterStemmer
import re
# Tutorial about Python regular expressions: https://pymotw.com/2/re/
import string
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.stem.wordnet import WordNetLemmatizer
from gensim.models import Word2Vec
from gensim.models import KeyedVectors
import pickle
from tqdm import tqdm
import os
data=pd.read_csv('/home/rahul/Downloads/Archive/Reviews.csv')
data
data=data[data['Score']!=3]
def partition(x):
if x < 3:
return 0
return 1
score_upd = data['Score']
temp = score_upd.map(partition)
data['Score'] = temp
data_f=data.sort_values('ProductId').drop_duplicates(subset=['UserId','ProfileName','Time','Summary','Text'],keep='first',inplace=False)
Final_Values=data_f[data_f['HelpfulnessDenominator']>=data_f['HelpfulnessNumerator']]
Final_Values
stop=set(stopwords.words('english'))
snow = nltk.stem.SnowballStemmer('english')
stop
def decontracted(phrase):
phrase=re.sub(r"won't","will not",phrase)
phrase=re.sub(r"can't","can not",phrase)
phrase=re.sub(r"n\'t","not",phrase)
phrase=re.sub(r"\'re","are",phrase)
phrase=re.sub(r"\'s","is",phrase)
phrase=re.sub(r"\'d","would",phrase)
phrase=re.sub(r"\'ll","will",phrase)
phrase=re.sub(r"\'t","not",sentence)
phrase=re.sub(r"\'ve","have",sentence)
phrase=re.sub(r"\'m","am",sentence)
return phrase
preprocessed_reviews=[]
for sentence in Final_Values['Text'].values:
sentence=re.sub(r"http\S+"," ",sentence)
sentence=BeautifulSoup(sentence,'lxml').get_text()
cleanr=re.compile('<.*?>')
sentence=re.sub(cleanr,' ',sentence)
sentence=decontracted(sentence)
sentence=re.sub("\S\*\d\S*"," ",sentence)
sentence=re.sub("[^A-Za-z]+"," ",sentence)
sentence=re.sub(r'[?|!|\'|"|#]',r' ',sentence)
sentence=re.sub(r'[.|,|)|(|\|/]',r' ',sentence)
sentence=' '.join(snow.stem(e.lower()) for e in sentence.split() if e.lower() not in stop)
preprocessed_reviews.append(sentence.strip())
Final_Values['preprocessed_reviews']=preprocessed_reviews #adding cleaned text to the
Final_Values['preprocessed_reviews']=Final_Values['preprocessed_reviews']
Final_Values['preprocessed_reviews'].iloc[0]
Final_Values.to_csv('preprocessed_reviews.csv',index=False)
```
|
github_jupyter
|
import warnings
warnings.filterwarnings("ignore")
import pandas as pd #pandas library
import numpy as np
import nltk
import string
from bs4 import BeautifulSoup
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import confusion_matrix
from sklearn import metrics
from sklearn.metrics import roc_curve, auc
from nltk.stem.porter import PorterStemmer
import re
# Tutorial about Python regular expressions: https://pymotw.com/2/re/
import string
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.stem.wordnet import WordNetLemmatizer
from gensim.models import Word2Vec
from gensim.models import KeyedVectors
import pickle
from tqdm import tqdm
import os
data=pd.read_csv('/home/rahul/Downloads/Archive/Reviews.csv')
data
data=data[data['Score']!=3]
def partition(x):
if x < 3:
return 0
return 1
score_upd = data['Score']
temp = score_upd.map(partition)
data['Score'] = temp
data_f=data.sort_values('ProductId').drop_duplicates(subset=['UserId','ProfileName','Time','Summary','Text'],keep='first',inplace=False)
Final_Values=data_f[data_f['HelpfulnessDenominator']>=data_f['HelpfulnessNumerator']]
Final_Values
stop=set(stopwords.words('english'))
snow = nltk.stem.SnowballStemmer('english')
stop
def decontracted(phrase):
phrase=re.sub(r"won't","will not",phrase)
phrase=re.sub(r"can't","can not",phrase)
phrase=re.sub(r"n\'t","not",phrase)
phrase=re.sub(r"\'re","are",phrase)
phrase=re.sub(r"\'s","is",phrase)
phrase=re.sub(r"\'d","would",phrase)
phrase=re.sub(r"\'ll","will",phrase)
phrase=re.sub(r"\'t","not",sentence)
phrase=re.sub(r"\'ve","have",sentence)
phrase=re.sub(r"\'m","am",sentence)
return phrase
preprocessed_reviews=[]
for sentence in Final_Values['Text'].values:
sentence=re.sub(r"http\S+"," ",sentence)
sentence=BeautifulSoup(sentence,'lxml').get_text()
cleanr=re.compile('<.*?>')
sentence=re.sub(cleanr,' ',sentence)
sentence=decontracted(sentence)
sentence=re.sub("\S\*\d\S*"," ",sentence)
sentence=re.sub("[^A-Za-z]+"," ",sentence)
sentence=re.sub(r'[?|!|\'|"|#]',r' ',sentence)
sentence=re.sub(r'[.|,|)|(|\|/]',r' ',sentence)
sentence=' '.join(snow.stem(e.lower()) for e in sentence.split() if e.lower() not in stop)
preprocessed_reviews.append(sentence.strip())
Final_Values['preprocessed_reviews']=preprocessed_reviews #adding cleaned text to the
Final_Values['preprocessed_reviews']=Final_Values['preprocessed_reviews']
Final_Values['preprocessed_reviews'].iloc[0]
Final_Values.to_csv('preprocessed_reviews.csv',index=False)
| 0.308919 | 0.217919 |
<img align="left" src="https://lever-client-logos.s3.amazonaws.com/864372b1-534c-480e-acd5-9711f850815c-1524247202159.png" width=200>
<br></br>
<br></br>
## *Data Science Unit 4 Sprint 3 Assignment 1*
# Recurrent Neural Networks and Long Short Term Memory (LSTM)

It is said that [infinite monkeys typing for an infinite amount of time](https://en.wikipedia.org/wiki/Infinite_monkey_theorem) will eventually type, among other things, the complete works of Wiliam Shakespeare. Let's see if we can get there a bit faster, with the power of Recurrent Neural Networks and LSTM.
This text file contains the complete works of Shakespeare: https://www.gutenberg.org/files/100/100-0.txt
Use it as training data for an RNN - you can keep it simple and train character level, and that is suggested as an initial approach.
Then, use that trained RNN to generate Shakespearean-ish text. Your goal - a function that can take, as an argument, the size of text (e.g. number of characters or lines) to generate, and returns generated text of that size.
Note - Shakespeare wrote an awful lot. It's OK, especially initially, to sample/use smaller data and parameters, so you can have a tighter feedback loop when you're trying to get things running. Then, once you've got a proof of concept - start pushing it more!
```
from bs4 import BeautifulSoup
import requests
import re
url = 'https://www.gutenberg.org/files/100/100-0.txt'
response = requests.get(url)
shake = BeautifulSoup(response.text, 'html.parser')
shake = re.sub('[^A-Za-z0-9]+', ' ', shake.text)
sonnets = shake[2018:93446]
chars = list(set(sonnets))
# Based on "The Unreasonable Effectiveness of RNN" implementation
import numpy as np
chars = list(set(sonnets)) # split and remove duplicate characters. convert to list.
num_chars = len(chars) # the number of unique characters
txt_data_size = len(sonnets)
print("unique characters : ", num_chars)
print("txt_data_size : ", sonnets)
# one hot encode
char_to_int = dict((c, i) for i, c in enumerate(chars)) # "enumerate" retruns index and value. Convert it to dictionary
int_to_char = dict((i, c) for i, c in enumerate(chars))
print(char_to_int)
print("----------------------------------------------------")
print(int_to_char)
print("----------------------------------------------------")
# integer encode input data
integer_encoded = [char_to_int[i] for i in sonnets] # "integer_encoded" is a list which has a sequence converted from an original data to integers.
print(integer_encoded)
print("----------------------------------------------------")
print("data length : ", len(integer_encoded))
# hyperparameters
iteration = 40
sequence_length = 40
batch_size = round((txt_data_size /sequence_length)+0.5) # = math.ceil
hidden_size = 200 # size of hidden layer of neurons.
learning_rate = 1e-1
# model parameters
W_xh = np.random.randn(hidden_size, num_chars)*0.01 # weight input -> hidden.
W_hh = np.random.randn(hidden_size, hidden_size)*0.01 # weight hidden -> hidden
W_hy = np.random.randn(num_chars, hidden_size)*0.01 # weight hidden -> output
b_h = np.zeros((hidden_size, 1)) # hidden bias
b_y = np.zeros((num_chars, 1)) # output bias
h_prev = np.zeros((hidden_size,1)) # h_(t-1)
def forwardprop(inputs, targets, h_prev):
# Since the RNN receives the sequence, the weights are not updated during one sequence.
xs, hs, ys, ps = {}, {}, {}, {} # dictionary
hs[-1] = np.copy(h_prev) # Copy previous hidden state vector to -1 key value.
loss = 0 # loss initialization
for t in range(len(inputs)): # t is a "time step" and is used as a key(dic).
xs[t] = np.zeros((num_chars,1))
xs[t][inputs[t]] = 1
hs[t] = np.tanh(np.dot(W_xh, xs[t]) + np.dot(W_hh, hs[t-1]) + b_h) # hidden state.
ys[t] = np.dot(W_hy, hs[t]) + b_y # unnormalized log probabilities for next chars
ps[t] = np.exp(ys[t]) / np.sum(np.exp(ys[t])) # probabilities for next chars.
# Softmax. -> The sum of probabilities is 1 even without the exp() function, but all of the elements are positive through the exp() function.
loss += -np.log(ps[t][targets[t],0]) # softmax (cross-entropy loss). Efficient and simple code
# y_class = np.zeros((num_chars, 1))
# y_class[targets[t]] =1
# loss += np.sum(y_class*(-np.log(ps[t]))) # softmax (cross-entropy loss)
return loss, ps, hs, xs
def backprop(ps, inputs, hs, xs, targets):
dWxh, dWhh, dWhy = np.zeros_like(W_xh), np.zeros_like(W_hh), np.zeros_like(W_hy) # make all zero matrices.
dbh, dby = np.zeros_like(b_h), np.zeros_like(b_y)
dhnext = np.zeros_like(hs[0]) # (hidden_size,1)
# reversed
for t in reversed(range(len(inputs))):
dy = np.copy(ps[t]) # shape (num_chars,1). "dy" means "dloss/dy"
dy[targets[t]] -= 1 # backprop into y. After taking the soft max in the input vector, subtract 1 from the value of the element corresponding to the correct label.
dWhy += np.dot(dy, hs[t].T)
dby += dy
dh = np.dot(W_hy.T, dy) + dhnext # backprop into h.
dhraw = (1 - hs[t] * hs[t]) * dh # backprop through tanh nonlinearity #tanh'(x) = 1-tanh^2(x)
dbh += dhraw
dWxh += np.dot(dhraw, xs[t].T)
dWhh += np.dot(dhraw, hs[t-1].T)
dhnext = np.dot(W_hh.T, dhraw)
for dparam in [dWxh, dWhh, dWhy, dbh, dby]:
np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients.
return dWxh, dWhh, dWhy, dbh, dby
%%time
data_pointer = 0
# memory variables for Adagrad
mWxh, mWhh, mWhy = np.zeros_like(W_xh), np.zeros_like(W_hh), np.zeros_like(W_hy)
mbh, mby = np.zeros_like(b_h), np.zeros_like(b_y)
for i in range(iteration):
h_prev = np.zeros((hidden_size,1)) # reset RNN memory
data_pointer = 0 # go from start of data
for b in range(batch_size):
inputs = [char_to_int[ch] for ch in sonnets[data_pointer:data_pointer+sequence_length]]
targets = [char_to_int[ch] for ch in sonnets[data_pointer+1:data_pointer+sequence_length+1]] # t+1
if (data_pointer+sequence_length+1 >= len(sonnets) and b == batch_size-1): # processing of the last part of the input data.
# targets.append(char_to_int[txt_data[0]]) # When the data doesn't fit, add the first char to the back.
targets.append(char_to_int[" "]) # When the data doesn't fit, add space(" ") to the back.
# forward
loss, ps, hs, xs = forwardprop(inputs, targets, h_prev)
# print(loss)
# backward
dWxh, dWhh, dWhy, dbh, dby = backprop(ps, inputs, hs, xs, targets)
# perform parameter update with Adagrad
for param, dparam, mem in zip([W_xh, W_hh, W_hy, b_h, b_y],
[dWxh, dWhh, dWhy, dbh, dby],
[mWxh, mWhh, mWhy, mbh, mby]):
mem += dparam * dparam # elementwise
param += -learning_rate * dparam / np.sqrt(mem + 1e-8) # adagrad update
data_pointer += sequence_length # move data pointer
if i % 2 == 0:
print ('iter %d, loss: %f' % (i, loss)) # print progress
def predict(test_char, length):
x = np.zeros((num_chars, 1))
x[char_to_int[test_char]] = 1
ixes = []
h = np.zeros((hidden_size,1))
for t in range(length):
h = np.tanh(np.dot(W_xh, x) + np.dot(W_hh, h) + b_h)
y = np.dot(W_hy, h) + b_y
p = np.exp(y) / np.sum(np.exp(y))
ix = np.random.choice(range(num_chars), p=p.ravel()) # ravel -> rank0
# "ix" is a list of indexes selected according to the soft max probability.
x = np.zeros((num_chars, 1)) # init
x[ix] = 1
ixes.append(ix) # list
txt = test_char + ''.join(int_to_char[i] for i in ixes)
print ('%s' % (txt, ))
predict('T', 2000)
sonnets
```
# Resources and Stretch Goals
## Stretch goals:
- Refine the training and generation of text to be able to ask for different genres/styles of Shakespearean text (e.g. plays versus sonnets)
- Train a classification model that takes text and returns which work of Shakespeare it is most likely to be from
- Make it more performant! Many possible routes here - lean on Keras, optimize the code, and/or use more resources (AWS, etc.)
- Revisit the news example from class, and improve it - use categories or tags to refine the model/generation, or train a news classifier
- Run on bigger, better data
## Resources:
- [The Unreasonable Effectiveness of Recurrent Neural Networks](https://karpathy.github.io/2015/05/21/rnn-effectiveness/) - a seminal writeup demonstrating a simple but effective character-level NLP RNN
- [Simple NumPy implementation of RNN](https://github.com/JY-Yoon/RNN-Implementation-using-NumPy/blob/master/RNN%20Implementation%20using%20NumPy.ipynb) - Python 3 version of the code from "Unreasonable Effectiveness"
- [TensorFlow RNN Tutorial](https://github.com/tensorflow/models/tree/master/tutorials/rnn) - code for training a RNN on the Penn Tree Bank language dataset
- [4 part tutorial on RNN](http://www.wildml.com/2015/09/recurrent-neural-networks-tutorial-part-1-introduction-to-rnns/) - relates RNN to the vanishing gradient problem, and provides example implementation
- [RNN training tips and tricks](https://github.com/karpathy/char-rnn#tips-and-tricks) - some rules of thumb for parameterizing and training your RNN
|
github_jupyter
|
from bs4 import BeautifulSoup
import requests
import re
url = 'https://www.gutenberg.org/files/100/100-0.txt'
response = requests.get(url)
shake = BeautifulSoup(response.text, 'html.parser')
shake = re.sub('[^A-Za-z0-9]+', ' ', shake.text)
sonnets = shake[2018:93446]
chars = list(set(sonnets))
# Based on "The Unreasonable Effectiveness of RNN" implementation
import numpy as np
chars = list(set(sonnets)) # split and remove duplicate characters. convert to list.
num_chars = len(chars) # the number of unique characters
txt_data_size = len(sonnets)
print("unique characters : ", num_chars)
print("txt_data_size : ", sonnets)
# one hot encode
char_to_int = dict((c, i) for i, c in enumerate(chars)) # "enumerate" retruns index and value. Convert it to dictionary
int_to_char = dict((i, c) for i, c in enumerate(chars))
print(char_to_int)
print("----------------------------------------------------")
print(int_to_char)
print("----------------------------------------------------")
# integer encode input data
integer_encoded = [char_to_int[i] for i in sonnets] # "integer_encoded" is a list which has a sequence converted from an original data to integers.
print(integer_encoded)
print("----------------------------------------------------")
print("data length : ", len(integer_encoded))
# hyperparameters
iteration = 40
sequence_length = 40
batch_size = round((txt_data_size /sequence_length)+0.5) # = math.ceil
hidden_size = 200 # size of hidden layer of neurons.
learning_rate = 1e-1
# model parameters
W_xh = np.random.randn(hidden_size, num_chars)*0.01 # weight input -> hidden.
W_hh = np.random.randn(hidden_size, hidden_size)*0.01 # weight hidden -> hidden
W_hy = np.random.randn(num_chars, hidden_size)*0.01 # weight hidden -> output
b_h = np.zeros((hidden_size, 1)) # hidden bias
b_y = np.zeros((num_chars, 1)) # output bias
h_prev = np.zeros((hidden_size,1)) # h_(t-1)
def forwardprop(inputs, targets, h_prev):
# Since the RNN receives the sequence, the weights are not updated during one sequence.
xs, hs, ys, ps = {}, {}, {}, {} # dictionary
hs[-1] = np.copy(h_prev) # Copy previous hidden state vector to -1 key value.
loss = 0 # loss initialization
for t in range(len(inputs)): # t is a "time step" and is used as a key(dic).
xs[t] = np.zeros((num_chars,1))
xs[t][inputs[t]] = 1
hs[t] = np.tanh(np.dot(W_xh, xs[t]) + np.dot(W_hh, hs[t-1]) + b_h) # hidden state.
ys[t] = np.dot(W_hy, hs[t]) + b_y # unnormalized log probabilities for next chars
ps[t] = np.exp(ys[t]) / np.sum(np.exp(ys[t])) # probabilities for next chars.
# Softmax. -> The sum of probabilities is 1 even without the exp() function, but all of the elements are positive through the exp() function.
loss += -np.log(ps[t][targets[t],0]) # softmax (cross-entropy loss). Efficient and simple code
# y_class = np.zeros((num_chars, 1))
# y_class[targets[t]] =1
# loss += np.sum(y_class*(-np.log(ps[t]))) # softmax (cross-entropy loss)
return loss, ps, hs, xs
def backprop(ps, inputs, hs, xs, targets):
dWxh, dWhh, dWhy = np.zeros_like(W_xh), np.zeros_like(W_hh), np.zeros_like(W_hy) # make all zero matrices.
dbh, dby = np.zeros_like(b_h), np.zeros_like(b_y)
dhnext = np.zeros_like(hs[0]) # (hidden_size,1)
# reversed
for t in reversed(range(len(inputs))):
dy = np.copy(ps[t]) # shape (num_chars,1). "dy" means "dloss/dy"
dy[targets[t]] -= 1 # backprop into y. After taking the soft max in the input vector, subtract 1 from the value of the element corresponding to the correct label.
dWhy += np.dot(dy, hs[t].T)
dby += dy
dh = np.dot(W_hy.T, dy) + dhnext # backprop into h.
dhraw = (1 - hs[t] * hs[t]) * dh # backprop through tanh nonlinearity #tanh'(x) = 1-tanh^2(x)
dbh += dhraw
dWxh += np.dot(dhraw, xs[t].T)
dWhh += np.dot(dhraw, hs[t-1].T)
dhnext = np.dot(W_hh.T, dhraw)
for dparam in [dWxh, dWhh, dWhy, dbh, dby]:
np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients.
return dWxh, dWhh, dWhy, dbh, dby
%%time
data_pointer = 0
# memory variables for Adagrad
mWxh, mWhh, mWhy = np.zeros_like(W_xh), np.zeros_like(W_hh), np.zeros_like(W_hy)
mbh, mby = np.zeros_like(b_h), np.zeros_like(b_y)
for i in range(iteration):
h_prev = np.zeros((hidden_size,1)) # reset RNN memory
data_pointer = 0 # go from start of data
for b in range(batch_size):
inputs = [char_to_int[ch] for ch in sonnets[data_pointer:data_pointer+sequence_length]]
targets = [char_to_int[ch] for ch in sonnets[data_pointer+1:data_pointer+sequence_length+1]] # t+1
if (data_pointer+sequence_length+1 >= len(sonnets) and b == batch_size-1): # processing of the last part of the input data.
# targets.append(char_to_int[txt_data[0]]) # When the data doesn't fit, add the first char to the back.
targets.append(char_to_int[" "]) # When the data doesn't fit, add space(" ") to the back.
# forward
loss, ps, hs, xs = forwardprop(inputs, targets, h_prev)
# print(loss)
# backward
dWxh, dWhh, dWhy, dbh, dby = backprop(ps, inputs, hs, xs, targets)
# perform parameter update with Adagrad
for param, dparam, mem in zip([W_xh, W_hh, W_hy, b_h, b_y],
[dWxh, dWhh, dWhy, dbh, dby],
[mWxh, mWhh, mWhy, mbh, mby]):
mem += dparam * dparam # elementwise
param += -learning_rate * dparam / np.sqrt(mem + 1e-8) # adagrad update
data_pointer += sequence_length # move data pointer
if i % 2 == 0:
print ('iter %d, loss: %f' % (i, loss)) # print progress
def predict(test_char, length):
x = np.zeros((num_chars, 1))
x[char_to_int[test_char]] = 1
ixes = []
h = np.zeros((hidden_size,1))
for t in range(length):
h = np.tanh(np.dot(W_xh, x) + np.dot(W_hh, h) + b_h)
y = np.dot(W_hy, h) + b_y
p = np.exp(y) / np.sum(np.exp(y))
ix = np.random.choice(range(num_chars), p=p.ravel()) # ravel -> rank0
# "ix" is a list of indexes selected according to the soft max probability.
x = np.zeros((num_chars, 1)) # init
x[ix] = 1
ixes.append(ix) # list
txt = test_char + ''.join(int_to_char[i] for i in ixes)
print ('%s' % (txt, ))
predict('T', 2000)
sonnets
| 0.440229 | 0.879458 |
<a href="https://colab.research.google.com/github/icarogoggin/BlueEdtech/blob/main/Aula04_Exercicios_complementares.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
1. Faça um Programa que mostre a mensagem "Olá Mundo" na tela.
```
print ('Olá Mundo')
```
2. Faça um Programa que peça um número e então mostre a mensagem O número informado foi [número].
```
n1 = int(input('Digite um número: '))
print (f'O número informado foi {n1}.')
```
3. Faça um Programa que peça dois números e imprima a soma.
```
n1 = int(input('Digite um número: '))
n2 = int(input('Digite um número: '))
print ('O resultado desta soma é:', n1 + n2)
```
4. Faça um Programa que peça as 4 notas bimestrais e mostre a média.
```
nota1 = float(input('Digite a primeira nota: '))
nota2 = float(input('Digite a segunda nota: '))
nota3 = float(input('Digite a terceira nota: '))
nota4 = float(input('Digite a quarta nota: '))
media = float((nota1 + nota2 + nota3 + nota4)/ 4)
print ('A média do aluno foi: ', media)
```
5. Faça um Programa que converta metros para centímetros.
```
m = float(input('Coloque o tamanho em (m) que você quer converter para (cm): '))
cm = m * 100
print (f'{m} metro(s) são {cm} centímetros')
```
6. Faça um Programa que peça o raio de um círculo, calcule e mostre sua área.
```
raio = float(input('Digite aqui o raio: '))
area = raio ** 2 *3.14
print (f'Crincunferências de raio {raio} a área calculada é: {area}')
```
7. Faça um Programa que calcule a área de um quadrado, em seguida mostre o dobro desta área para o usuário.
```
lquadrado = float(input('Digite um dos lados do quadrado: '))
area = lquadrado * lquadrado
print (f'A área calculada é: {area} \nO Dobro da área é:', area * 2)
```
8. Faça um Programa que pergunte quanto você ganha por hora e o número de horas trabalhadas no mês.<br> Calcule e mostre o total do seu salário no referido mês.
```
vhora = float(input('Quanto você ganhará por hora: '))
thora = float(input('Quantas horas você trabalhará no mês: '))
print ('Seu salário mensal será de R$', vhora * thora)
```
9. Faça um Programa que peça a temperatura em graus Fahrenheit, transforme e mostre a temperatura em graus Celsius.<br>
C = 5 * ((F-32) / 9).
```
ftemp = float(input('Qual temperatura em fahrenheit: '))
ctemp = "%.2f" %(ftemp - 32 *5/9)
print (f'{ftemp}°F é equivalente a {ctemp}°C')
```
10. Faça um Programa que peça a temperatura em graus Celsius, transforme e mostre em graus Fahrenheit.
```
ctemp = float(input('Qual temperatura em Celsius: '))
ftemp = "%.2f" %(ctemp *5/9 + 32)
print (f'{ctemp}°C é equivalente a {ftemp}°F')
```
11. Faça um Programa que peça 2 números inteiros e um número real. Calcule e mostre:<br>
a. o produto do dobro do primeiro com metade do segundo. <br>
b. a soma do triplo do primeiro com o terceiro. <br>
c. o terceiro elevado ao cubo.
```
n1 = int(input('Digite o numero 1: '))
n2 = int(input('Digite o numero 2: '))
real = float(input('Digite o numero 3 (Real): '))
r1 = n1 * 2 * (n2 / 2)
r2 = n1 * 3 + real
r3 = real ** 3
print (f'''
O produto do dobro do primeiro numero com metade do segundo é: {r1}
A soma do triplo do primeiro numero com o terceiro é: {r2}
O terceiro numero elevado ao cubo é: {r3}''')
```
12. Tendo como dados de entrada a altura de uma pessoa, construa um algoritmo que calcule seu peso ideal, usando a seguinte fórmula:<br> (72.7*altura) - 58
```
alt = (input('Qual sua altura?'))
renamed_alt = float(alt.replace ('.', ''))
altura = renamed_alt/100
pesoideal = (72.7*altura)-58
print (f'Seu peso ideal é {pesoideal:.2f} kg')
```
13. Tendo como dado de entrada a altura (h) de uma pessoa, construa um algoritmo que calcule seu peso ideal, utilizando as seguintes fórmulas:<br>
a. Para homens: (72.7*h) - 58 <br>
b. Para mulheres: (62.1*h) - 44.7
```
alt = (input('Qual sua altura?'))
renamed_alt = float(alt.replace ('.', ''))
altura = renamed_alt/100
pesoidealH = (72.7*altura)-58
pesoidealF = (62.1*altura)-44.7
print (f'Seu peso ideal é {pesoidealH:.2f} kg, caso você seja homem.')
print (f'Seu peso ideal é {pesoidealF:.2f} kg, caso você seja mulher.')
#depois pretendo usar o if para deixar a pessoa escolher o sexo e saber apenas o peso que interessa a ela
```
14. João Papo-de-Pescador, homem de bem, comprou um microcomputador para controlar o rendimento diário de seu trabalho. Toda vez que ele traz um peso de peixes maior que o estabelecido pelo regulamento de pesca do estado de São Paulo (50 quilos) deve pagar uma multa de R$ 4,00 por quilo excedente. João precisa que você faça um programa que leia a variável peso (peso de peixes) e calcule o excesso. Gravar na variável excesso a quantidade de quilos além do limite e na variável multa o valor da multa que João deverá pagar. Imprima os dados do programa com as mensagens adequadas.
```
peso_total = float(input('Quantos Kg de peixe você pescou? '))
peso_excesso = peso_total-50
multa = peso_excesso * 4
if peso_excesso >= 0 :
print (f'João, você pescou {peso_total}Kg de peixes! excedendo o regulamento em {peso_excesso}Kg.\nVocê precisará pagar uma multa de R${multa}!')
else:
print ('Você pescou dentro da regulamentação, não pagará nenhuma multa!')
```
Faça um Programa que pergunte quanto você ganha por hora e o número de horas trabalhadas no mês. Calcule e mostre o total do seu salário no referido mês, sabendo-se que são descontados 11% para o Imposto de Renda, 8% para o INSS e 5% para o sindicato, faça um programa que nos dê:<br>
- salário bruto.<br>
- quanto pagou ao INSS.<br>
- quanto pagou ao sindicato.<br>
- o salário líquido.<br>
<p>calcule os descontos e o salário líquido, conforme a tabela abaixo:</p>
(+) Salário Bruto : <br>
(-) IR (11%) :<br>
(-) INSS (8%) :<br>
(-) Sindicato (5%) :<br>
(=) Salário Liquido :<br>
<br>
Obs.: Salário Bruto - Descontos = Salário Líquido. <br>
```
vhora = float(input('Quanto você ganha por hora: '))
thora = float(input('Quantas horas você trabalha por mês: '))
salario = vhora * thora
ir = salario * 0.11
inss = salario * 0.08
sindicato = salario * 0.05
salarioL = salario - ir - inss - sindicato
print (f'''Seu salário bruto é: R$ {salario}
O custo do seu IR é: R$ {ir}
O custo do seu INSS é: R$ {inss}
O custo do sindicato é: R$ {sindicato}
O seu salário liquido será de: R$ {salarioL}''')
```
16. Faça um programa para uma loja de tintas. O programa deverá pedir o tamanho em metros quadrados da área a ser pintada. Considere que a cobertura da tinta é de 1 litro para cada 3 metros quadrados e que a tinta é vendida em latas de 18 litros, que custam R$ 80,00. Informe ao usuário a quantidades de latas de tinta a serem compradas e o preço total.
```
```
17. Faça um Programa para uma loja de tintas. O programa deverá pedir o tamanho em metros quadrados da área a ser pintada. Considere que a cobertura da tinta é de 1 litro para cada 6 metros quadrados e que a tinta é vendida em latas de 18 litros, que custam R$ 80,00 ou em galões de 3,6 litros, que custam R$ 25,00.<br>
Informe ao usuário as quantidades de tinta a serem compradas e os respectivos preços em 3 situações:<br>
- comprar apenas latas de 18 litros;<br>
- comprar apenas galões de 3,6 litros;<br>
- misturar latas e galões, de forma que o desperdício de tinta seja menor.<br>
<br>
Acrescente 10% de folga e sempre arredonde os valores para cima, isto é, considere latas cheias.
```
```
18. Faça um programa que peça o tamanho de um arquivo para download (em MB) e a velocidade de um link de Internet (em Mbps), calcule e informe o tempo aproximado de download do arquivo usando este link (em minutos).
```
```
|
github_jupyter
|
print ('Olá Mundo')
n1 = int(input('Digite um número: '))
print (f'O número informado foi {n1}.')
n1 = int(input('Digite um número: '))
n2 = int(input('Digite um número: '))
print ('O resultado desta soma é:', n1 + n2)
nota1 = float(input('Digite a primeira nota: '))
nota2 = float(input('Digite a segunda nota: '))
nota3 = float(input('Digite a terceira nota: '))
nota4 = float(input('Digite a quarta nota: '))
media = float((nota1 + nota2 + nota3 + nota4)/ 4)
print ('A média do aluno foi: ', media)
m = float(input('Coloque o tamanho em (m) que você quer converter para (cm): '))
cm = m * 100
print (f'{m} metro(s) são {cm} centímetros')
raio = float(input('Digite aqui o raio: '))
area = raio ** 2 *3.14
print (f'Crincunferências de raio {raio} a área calculada é: {area}')
lquadrado = float(input('Digite um dos lados do quadrado: '))
area = lquadrado * lquadrado
print (f'A área calculada é: {area} \nO Dobro da área é:', area * 2)
vhora = float(input('Quanto você ganhará por hora: '))
thora = float(input('Quantas horas você trabalhará no mês: '))
print ('Seu salário mensal será de R$', vhora * thora)
ftemp = float(input('Qual temperatura em fahrenheit: '))
ctemp = "%.2f" %(ftemp - 32 *5/9)
print (f'{ftemp}°F é equivalente a {ctemp}°C')
ctemp = float(input('Qual temperatura em Celsius: '))
ftemp = "%.2f" %(ctemp *5/9 + 32)
print (f'{ctemp}°C é equivalente a {ftemp}°F')
n1 = int(input('Digite o numero 1: '))
n2 = int(input('Digite o numero 2: '))
real = float(input('Digite o numero 3 (Real): '))
r1 = n1 * 2 * (n2 / 2)
r2 = n1 * 3 + real
r3 = real ** 3
print (f'''
O produto do dobro do primeiro numero com metade do segundo é: {r1}
A soma do triplo do primeiro numero com o terceiro é: {r2}
O terceiro numero elevado ao cubo é: {r3}''')
alt = (input('Qual sua altura?'))
renamed_alt = float(alt.replace ('.', ''))
altura = renamed_alt/100
pesoideal = (72.7*altura)-58
print (f'Seu peso ideal é {pesoideal:.2f} kg')
alt = (input('Qual sua altura?'))
renamed_alt = float(alt.replace ('.', ''))
altura = renamed_alt/100
pesoidealH = (72.7*altura)-58
pesoidealF = (62.1*altura)-44.7
print (f'Seu peso ideal é {pesoidealH:.2f} kg, caso você seja homem.')
print (f'Seu peso ideal é {pesoidealF:.2f} kg, caso você seja mulher.')
#depois pretendo usar o if para deixar a pessoa escolher o sexo e saber apenas o peso que interessa a ela
peso_total = float(input('Quantos Kg de peixe você pescou? '))
peso_excesso = peso_total-50
multa = peso_excesso * 4
if peso_excesso >= 0 :
print (f'João, você pescou {peso_total}Kg de peixes! excedendo o regulamento em {peso_excesso}Kg.\nVocê precisará pagar uma multa de R${multa}!')
else:
print ('Você pescou dentro da regulamentação, não pagará nenhuma multa!')
vhora = float(input('Quanto você ganha por hora: '))
thora = float(input('Quantas horas você trabalha por mês: '))
salario = vhora * thora
ir = salario * 0.11
inss = salario * 0.08
sindicato = salario * 0.05
salarioL = salario - ir - inss - sindicato
print (f'''Seu salário bruto é: R$ {salario}
O custo do seu IR é: R$ {ir}
O custo do seu INSS é: R$ {inss}
O custo do sindicato é: R$ {sindicato}
O seu salário liquido será de: R$ {salarioL}''')
```
17. Faça um Programa para uma loja de tintas. O programa deverá pedir o tamanho em metros quadrados da área a ser pintada. Considere que a cobertura da tinta é de 1 litro para cada 6 metros quadrados e que a tinta é vendida em latas de 18 litros, que custam R$ 80,00 ou em galões de 3,6 litros, que custam R$ 25,00.<br>
Informe ao usuário as quantidades de tinta a serem compradas e os respectivos preços em 3 situações:<br>
- comprar apenas latas de 18 litros;<br>
- comprar apenas galões de 3,6 litros;<br>
- misturar latas e galões, de forma que o desperdício de tinta seja menor.<br>
<br>
Acrescente 10% de folga e sempre arredonde os valores para cima, isto é, considere latas cheias.
18. Faça um programa que peça o tamanho de um arquivo para download (em MB) e a velocidade de um link de Internet (em Mbps), calcule e informe o tempo aproximado de download do arquivo usando este link (em minutos).
| 0.447943 | 0.968141 |
# Anaconda & Jupyter Notebooks
## Anaconda/miniconda Commands
[See class here](https://classroom.udacity.com/courses/ud1111)
[Cheat sheet](https://docs.conda.io/projects/conda/en/latest/user-guide/cheatsheet.html)
### Update
`conda update conda`
`conda update --all`
### Packages
`conda list`
`conda install package_name`
`conda remove package_name`
`conda conda search *search_term*`
### Environment
`conda env list`
`conda create -n env_name list_of_packages`
`conda env remove -n env_name`
`conda activate env_name`
`source deactivate`
Export environment
`conda env export > environment.yaml`
`conda env create -f environment.yaml`
__Good Practice__: Make an environment file and include it in the github repository.
_Also include a pip requirements.txt file using pip freeze for people not using conda._
### pip
pip freeze [options]
`pip freeze > requirements.txt`
`pip install -r requirements.txt`
[Documentation](https://pip.pypa.io/en/stable/reference/pip_freeze/)
## Jupyter Notebooks
**Installation:**
`conda install jupyter notebook`
`conda install nb_conda`
### Markdown
[Github Guide](https://guides.github.com/features/mastering-markdown/)
[Cheat sheet](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet)
### Magic Keywords
[List of magic keywords](https://ipython.readthedocs.io/en/stable/interactive/magics.html)
Magic keywords are special commands you can run in cells that let you control the notebook itself or perform system calls such as changing directories. For example, you can set up matplotlib to work interactively in the notebook with `%matplotlib`.
#### **Timing**
Time a function: `%timeit function_name`
Time a whole cell: put `%%timeit` at the first line of code
```
# Time a function example
from math import sqrt
def fibo1(n): # Recursive Fibonacci Number
if n == 0:
return 0
elif n == 1:
return 1
return fibo1(n-1) + fibo1(n-2)
def fibo2(n): # Closed Form
return ((1+sqrt(5))**n - (1-sqrt(5))**n)/(2**n*sqrt(5))
%timeit fibo1(20)
%timeit fibo2(20)
# Time a whole cell example
import random
%%timeit
prize = 0
for ii in range(100):
# roll a dice
roll = random.randint(1, 6)
if roll%2 == 0:
prize += roll
else:
prize -= 1
%%timeit
rolls = (random.randint(1, 6) for _ in range(100))
prize = sum(roll if roll%2 == 0 else -1 for roll in rolls)
```
#### **Embedding visualizations**
By default figures will render in their own window. However, you can pass arguments to the command to select a specific "backend", the software that renders the image. To render figures directly in the notebook, you should use the inline backend with the command `%matplotlib inline`
_**Tip**: On higher resolution screens such as Retina displays, the default images in notebooks can look blurry._
```
%config InlineBackend.figure_format = 'retina'
%matplotlib inline
```
_to render higher resolution images._
```
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(0, 1, 300)
for w in range(2, 6, 2):
plt.plot(x, np.sin(np.pi*x) * np.sin(2*w*np.pi*x))
```
### Debugging
`%pdb`
[Documentation](https://docs.python.org/3/library/pdb.html)
```
%pdb
numbers = "hello"
sum(numbers)
```
To **quit** the debugger, simply enter `q` in the prompt.
### Converting notebooks
Notebooks are just big [**JSON**](http://www.json.org/) files with the extension `.ipynb`
Jupyter comes with a utility called `nbconvert` for converting to HTML, Markdown, slideshows, etc.
For example, to convert a notebook to an HTML file, in your terminal use
`jupyter nbconvert my_notebook.ipynb --to html --output output.html`
_Converting to HTML is useful for sharing your notebooks with others who aren't using notebooks. Markdown is great for including a notebook in blogs and other text editors that accept Markdown formatting._
[Documentation](https://nbconvert.readthedocs.io/en/latest/usage.html)
### Creating a slideshow
An [example](https://nbviewer.jupyter.org/format/slides/github/jorisvandenbossche/2015-PyDataParis/blob/master/pandas_introduction.ipynb#/) of a slideshow introducing _pandas_ for working with data.
The slides are created in notebooks like normal, but you'll need to designate which cells are slides and the type of slide the cell will be. In the menu bar, click View > Cell Toolbar > Slideshow to bring up the slide cell menu on each cell.
This will show a menu dropdown on each cell that lets you choose how the cell shows up in the slideshow.
**Slides** are full slides that you move through left to right. **Sub-slides** show up in the slideshow by pressing up or down. **Fragments** are hidden at first, then appear with a button press. You can skip cells in the slideshow with **Skip** and **Notes** leaves the cell as speaker notes.
### Running the slideshow
Create the slideshow from the notebook file
`jupyter nbconvert notebook.ipynb --to slides`
This just converts the notebook to the necessary files for the slideshow, but you need to serve it with an HTTP server to actually see the presentation.
To convert it and immediately see it, use
`jupyter nbconvert notebook.ipynb --to slides --post serve`
|
github_jupyter
|
# Time a function example
from math import sqrt
def fibo1(n): # Recursive Fibonacci Number
if n == 0:
return 0
elif n == 1:
return 1
return fibo1(n-1) + fibo1(n-2)
def fibo2(n): # Closed Form
return ((1+sqrt(5))**n - (1-sqrt(5))**n)/(2**n*sqrt(5))
%timeit fibo1(20)
%timeit fibo2(20)
# Time a whole cell example
import random
%%timeit
prize = 0
for ii in range(100):
# roll a dice
roll = random.randint(1, 6)
if roll%2 == 0:
prize += roll
else:
prize -= 1
%%timeit
rolls = (random.randint(1, 6) for _ in range(100))
prize = sum(roll if roll%2 == 0 else -1 for roll in rolls)
%config InlineBackend.figure_format = 'retina'
%matplotlib inline
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(0, 1, 300)
for w in range(2, 6, 2):
plt.plot(x, np.sin(np.pi*x) * np.sin(2*w*np.pi*x))
%pdb
numbers = "hello"
sum(numbers)
| 0.511229 | 0.887253 |
# Numpy Learning
This note contains numpy learning notebook which mostly learned from [Numerical Python, Johansson](https://learning.oreilly.com/library/view/numerical-python/9781484242469/html/332789_2_En_2_Chapter.xhtml), and the [github source](https://github.com/Apress/numerical-python-second-ed).
## CH.2 Vectors, matrices and multidimensional arrays
```
import numpy as np
data = np.array([[1, 2], [3, 4], [5, 6]])
type(data)
print(f'The dimension is : {data.ndim}');
print(f'The shape is : {data.shape}');
print(f'The size is : {data.size}');
print(f'The data type is : {data.dtype}');
print(f'The number of bytes is : {data.nbytes}');
```
### 2.1 Data Types
Data types in numpy can be either: <br>
* `int` <br>
* `float` <br>
* `complex` <br>
for `complex`, the real and imaginary parts can be shown:
```
np.array([1, 2, 3], dtype=np.int)
data = np.array([1, 2, 3], dtype=np.float)
data
data = np.array([1, 2, 3], dtype=np.complex)
data
print(f'The real numbers in the data:{data.real}');
print(f'The imaginary numbers in the data:{data.imag}');
```
## 2.2 Creating Arrays
```
data = np.array([11, 12, 13, 14]);
print(f'The matrix is : {data}');
print(f'The dimension is : {data.ndim}');
print(f'The shape is : {data.shape}');
data = np.array([[11, 12], [13, 14]]);
print(f'The matrix is : {data}');
print(f'The dimension is : {data.ndim}');
print(f'The shape is : {data.shape}');
```
### 2.2.1 Arrays filled with constant values
```
np.zeros((4,3))
data = np.ones(7)
print(f'The matrix/vector is : {data}');
print(f'The data type is : {data.dtype}');
x1 = 7.7 * np.ones(8)
print(f'The matrix/vector is : {x1}');
x2 = np.full(8, 7.7)
print(f'The matrix/vector is : {x2}');
x3 = np.empty(8)
x3.fill(7.7)
print(f'The matrix/vector is : {x3}');
```
### 2.2.2 Arrays filled with incremental sequences
```
x1 = np.arange(0.0, 10, 1) # 0 ->starts, 10 -> total num, 1 ->incremental
print(f'The matrix/vector is : {x1}');
x2 = np.linspace(0,99, 100); # 0 -> starts, 99 -->ends; 10 --> total num
x2
```
### 2.2.3 Arrays filled with logarithmic sequences
```
x1 = np.logspace(0,4,5) # 0->starts, 4-> x[]**range 0-4 , 3--> count num
x1
```
### 2.2.4 Mesh-grid arrays
```
x = np.array([-1, 0, 1])
y = np.array([-2, 0, 2])
X, Y = np.meshgrid(x, y)
X
Y
Z = (X + Y) ** 2
Z
```
### 2.2.5 Creating arrays with properties of other arrays
```
def f(x):
y = np.ones_like(x)
# compute with x and y
return y
f(10)
```
### 2.2.6 Creating matrix arrays
> `np.identity` to create identity matrix <br>
> `np.eye` to create eye matrix <br>
> `np.diag` to create diagonal matrix
```
np.identity(6)
np.eye(4, k=2)
np.eye(4, k=-2)
```
|
github_jupyter
|
import numpy as np
data = np.array([[1, 2], [3, 4], [5, 6]])
type(data)
print(f'The dimension is : {data.ndim}');
print(f'The shape is : {data.shape}');
print(f'The size is : {data.size}');
print(f'The data type is : {data.dtype}');
print(f'The number of bytes is : {data.nbytes}');
np.array([1, 2, 3], dtype=np.int)
data = np.array([1, 2, 3], dtype=np.float)
data
data = np.array([1, 2, 3], dtype=np.complex)
data
print(f'The real numbers in the data:{data.real}');
print(f'The imaginary numbers in the data:{data.imag}');
data = np.array([11, 12, 13, 14]);
print(f'The matrix is : {data}');
print(f'The dimension is : {data.ndim}');
print(f'The shape is : {data.shape}');
data = np.array([[11, 12], [13, 14]]);
print(f'The matrix is : {data}');
print(f'The dimension is : {data.ndim}');
print(f'The shape is : {data.shape}');
np.zeros((4,3))
data = np.ones(7)
print(f'The matrix/vector is : {data}');
print(f'The data type is : {data.dtype}');
x1 = 7.7 * np.ones(8)
print(f'The matrix/vector is : {x1}');
x2 = np.full(8, 7.7)
print(f'The matrix/vector is : {x2}');
x3 = np.empty(8)
x3.fill(7.7)
print(f'The matrix/vector is : {x3}');
x1 = np.arange(0.0, 10, 1) # 0 ->starts, 10 -> total num, 1 ->incremental
print(f'The matrix/vector is : {x1}');
x2 = np.linspace(0,99, 100); # 0 -> starts, 99 -->ends; 10 --> total num
x2
x1 = np.logspace(0,4,5) # 0->starts, 4-> x[]**range 0-4 , 3--> count num
x1
x = np.array([-1, 0, 1])
y = np.array([-2, 0, 2])
X, Y = np.meshgrid(x, y)
X
Y
Z = (X + Y) ** 2
Z
def f(x):
y = np.ones_like(x)
# compute with x and y
return y
f(10)
np.identity(6)
np.eye(4, k=2)
np.eye(4, k=-2)
| 0.404625 | 0.988777 |
# <font color=darkblue>ENGR 1330-2022-1 Exam1-Laboratory Portion </font>
**Alexander, Keegan**
**R11787034**
ENGR 1330 Exam 1 - Laboratory/Programming Skills
---
**Download** (right-click, save target as ...) this page as a jupyterlab notebook from: [s22-ex1-deploy.ipynb](http://54.243.252.9/engr-1330-webroot/5-ExamProblems/Exam1/Exam1/spring2022/s22-ex1-deploy.ipynb)
**If you are unable to download the file, create an empty notebook and copy paste the problems into Markdown cells and Code cells (problem-by-problem)**
---
## Problem 1 (10 pts) : <font color = 'magenta'>*Profile your computer*</font>
Execute the code cell below exactly as written. If you get an error just continue to the remaining problems.
```
# Preamble script block to identify host, user, and kernel
import sys
! hostname
! whoami
print(sys.executable)
print(sys.version)
print(sys.version_info)
```
---
## Problem 2 (10 pts): <font color = 'magenta'>*input(),typecast, string reversal, comparison based selection, print()*</font>
Build a script where the user will supply a number then determine if it is a palindrome number. A palindrome number is a number that is same after reversal. For example 545, is a palindrome number.
- Case 1: 545
- Case 2: 123
- Case 3: 666
```
# Case 1
numberstring = input("give a number for n:")
numdigits = len(numberstring)
if(numdigits % 2 == 0):
middlepoint = numdigits // 2
print ("middle point is:")
firsthalf = numberstring[0:middlepoint]
secondhalf = numberstring[middlepoint:]
print ("first half is:", firsthalf)
reversefirsthalf = firsthalf[::-1]
print("reverse first half is:", reversefirsthalf)
print ("second half is:", secondhalf)
if(numdigits % 3 == 0):
middlepoint = numdigits // 3
if(reversefirsthalf == secondhalf):
print(numberstring,' is a palindrome number.')
else:
print(numberstring,' is not a palindrome number.')
# Case 2
numberstring = input("give a number for n:")
numdigits = len(numberstring)
if(numdigits % 2 == 0):
middlepoint = numdigits // 2
print ("middle point is:")
firsthalf = numberstring[0:middlepoint]
secondhalf = numberstring[middlepoint:]
print ("first half is:", firsthalf)
reversefirsthalf = firsthalf[::-1]
print("reverse first half is:", reversefirsthalf)
print ("second half is:", secondhalf)
if(numdigits % 3 == 0):
middlepoint = numdigits // 3
if(reversefirsthalf == secondhalf):
print(numberstring,' is a palindrome number.')
else:
print(numberstring,' is not a palindrome number.')
# Case 3
numberstring = input("give a number for n:")
numdigits = len(numberstring)
if(numdigits % 2 == 0):
middlepoint = numdigits // 2
print ("middle point is:")
firsthalf = numberstring[0:middlepoint]
secondhalf = numberstring[middlepoint:]
print ("first half is:", firsthalf)
reversefirsthalf = firsthalf[::-1]
print("reverse first half is:", reversefirsthalf)
print ("second half is:", secondhalf)
if(numdigits % 3 == 0):
middlepoint = numdigits // 3
if(reversefirsthalf == secondhalf):
print(numberstring,' is a palindrome number.')
else:
print(numberstring,' is not a palindrome number.')
```
---
## Problem 3 (15 pts): <font color = 'magenta'>*len(),compare,accumulator, populate an empty list,for loop, print()*</font>
Two lists are defined as
```
x= [1.0,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8]
f_of_x = [1.543,1.668,1.811,1.971,2.151,2.352,2.577,2.828,3.107]
```
Create a script that determines the length of each list and if they are the same length then print the contents of each list row-wise, and the running sum of `f_of_x` so the output looks like
```
--x-- --f_of_x-- --sum--
1.0 1.543 1.543
1.1 1.668 3.211
... ... ...
... ... ...
1.7 2.828 16.901
1.8 3.107 20.008
```
Test your script using the two lists above, then with the two lists below:
```
x= [1.0,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8]
f_of_x =[1.543, 3.211, 5.022, 6.993, 9.144, 11.496, 14.073, 16.901, 20.008]
```
```
# define variables
# Case 1
x= [1.0,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8]
f_of_x = [1.543,1.668,1.811,1.971,2.151,2.352,2.577,2.828,3.107]
sum1 = x+f_of_x
length = len(x)
_length = len(f_of_x)#validate lengths
print ('length of x is ',length)
print ('length of f_of_x is ',_length)
newx = 0 # initialize accumulator and empty list to store a running sum
sum2 = []
print ("--x-- --f_of_x-- --sum--") # print (header line)
for i in range (0,9,1):
print (x[i], f_of_x[i], sum1[i])
# report result
# define variables
x= [1.0,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8]
f_of_x =[1.543, 3.211, 5.022, 6.993, 9.144, 11.496, 14.073, 16.901, 20.008]
sum1 = x+f_of_x
# Case 2
sum1 = x+f_of_x
length = len(x)
_length = len(f_of_x)#validate lengths
print ('length of x is ',length)
print ('length of f_of_x is ',_length)
newx = 0 # initialize accumulator and empty list to store a running sum
sum2 = []
print ("--x-- --f_of_x-- --sum--") # print (header line)
for i in range (0,9,1):
print (x[i], f_of_x[i], sum1[i])
```
---
## Problem 4 Function (15 points) : <font color = 'magenta'> *def ..., input(),typecast,arithmetic based selection, print()* </font>
Build a function that takes as input two integer numbers. The function should return their product if the product is greater than 666, otherwise the function should return their sum.
Employ the function in an interactive script and test the following cases:
- Case 1: 65 and 10
- Case 2: 66 and 11
- Case 3: 25 and 5
```
# define variables
# interactive input
# computation/compare
# report result
# Case 1
# define variables
x = int(input(('give a number: ')))
_x = int(input(('give another number: ')))
product = x*_x
sum1 = x+_x
# interactive input
# computation/compare
if (product > 666):
print(product)
else:
print(sum1)
# report result
# Case 2
# define variables
x = int(input(('give a number: ')))
_x = int(input(('give another number: ')))
product = x*_x
sum1 = x+_x
# interactive input
# computation/compare
if (product > 666):
print(product)
else:
print(sum1)
# report result
# Case 3
# define variables
x = int(input(('give a number: ')))
_x = int(input(('give another number: ')))
product = x*_x
sum1 = x+_x
# interactive input
# computation/compare
if (product > 666):
print(product)
else:
print(sum1)
# report result
```
|
github_jupyter
|
# Preamble script block to identify host, user, and kernel
import sys
! hostname
! whoami
print(sys.executable)
print(sys.version)
print(sys.version_info)
# Case 1
numberstring = input("give a number for n:")
numdigits = len(numberstring)
if(numdigits % 2 == 0):
middlepoint = numdigits // 2
print ("middle point is:")
firsthalf = numberstring[0:middlepoint]
secondhalf = numberstring[middlepoint:]
print ("first half is:", firsthalf)
reversefirsthalf = firsthalf[::-1]
print("reverse first half is:", reversefirsthalf)
print ("second half is:", secondhalf)
if(numdigits % 3 == 0):
middlepoint = numdigits // 3
if(reversefirsthalf == secondhalf):
print(numberstring,' is a palindrome number.')
else:
print(numberstring,' is not a palindrome number.')
# Case 2
numberstring = input("give a number for n:")
numdigits = len(numberstring)
if(numdigits % 2 == 0):
middlepoint = numdigits // 2
print ("middle point is:")
firsthalf = numberstring[0:middlepoint]
secondhalf = numberstring[middlepoint:]
print ("first half is:", firsthalf)
reversefirsthalf = firsthalf[::-1]
print("reverse first half is:", reversefirsthalf)
print ("second half is:", secondhalf)
if(numdigits % 3 == 0):
middlepoint = numdigits // 3
if(reversefirsthalf == secondhalf):
print(numberstring,' is a palindrome number.')
else:
print(numberstring,' is not a palindrome number.')
# Case 3
numberstring = input("give a number for n:")
numdigits = len(numberstring)
if(numdigits % 2 == 0):
middlepoint = numdigits // 2
print ("middle point is:")
firsthalf = numberstring[0:middlepoint]
secondhalf = numberstring[middlepoint:]
print ("first half is:", firsthalf)
reversefirsthalf = firsthalf[::-1]
print("reverse first half is:", reversefirsthalf)
print ("second half is:", secondhalf)
if(numdigits % 3 == 0):
middlepoint = numdigits // 3
if(reversefirsthalf == secondhalf):
print(numberstring,' is a palindrome number.')
else:
print(numberstring,' is not a palindrome number.')
x= [1.0,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8]
f_of_x = [1.543,1.668,1.811,1.971,2.151,2.352,2.577,2.828,3.107]
--x-- --f_of_x-- --sum--
1.0 1.543 1.543
1.1 1.668 3.211
... ... ...
... ... ...
1.7 2.828 16.901
1.8 3.107 20.008
x= [1.0,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8]
f_of_x =[1.543, 3.211, 5.022, 6.993, 9.144, 11.496, 14.073, 16.901, 20.008]
# define variables
# Case 1
x= [1.0,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8]
f_of_x = [1.543,1.668,1.811,1.971,2.151,2.352,2.577,2.828,3.107]
sum1 = x+f_of_x
length = len(x)
_length = len(f_of_x)#validate lengths
print ('length of x is ',length)
print ('length of f_of_x is ',_length)
newx = 0 # initialize accumulator and empty list to store a running sum
sum2 = []
print ("--x-- --f_of_x-- --sum--") # print (header line)
for i in range (0,9,1):
print (x[i], f_of_x[i], sum1[i])
# report result
# define variables
x= [1.0,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8]
f_of_x =[1.543, 3.211, 5.022, 6.993, 9.144, 11.496, 14.073, 16.901, 20.008]
sum1 = x+f_of_x
# Case 2
sum1 = x+f_of_x
length = len(x)
_length = len(f_of_x)#validate lengths
print ('length of x is ',length)
print ('length of f_of_x is ',_length)
newx = 0 # initialize accumulator and empty list to store a running sum
sum2 = []
print ("--x-- --f_of_x-- --sum--") # print (header line)
for i in range (0,9,1):
print (x[i], f_of_x[i], sum1[i])
# define variables
# interactive input
# computation/compare
# report result
# Case 1
# define variables
x = int(input(('give a number: ')))
_x = int(input(('give another number: ')))
product = x*_x
sum1 = x+_x
# interactive input
# computation/compare
if (product > 666):
print(product)
else:
print(sum1)
# report result
# Case 2
# define variables
x = int(input(('give a number: ')))
_x = int(input(('give another number: ')))
product = x*_x
sum1 = x+_x
# interactive input
# computation/compare
if (product > 666):
print(product)
else:
print(sum1)
# report result
# Case 3
# define variables
x = int(input(('give a number: ')))
_x = int(input(('give another number: ')))
product = x*_x
sum1 = x+_x
# interactive input
# computation/compare
if (product > 666):
print(product)
else:
print(sum1)
# report result
| 0.151372 | 0.91228 |
# Regression
## Install packages
```
import sys
!{sys.executable} -m pip install -r requirements.txt
import pandas as pd
import numpy as np
import os
import helper
import quiz_tests
import matplotlib.pyplot as plt
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = (14, 8)
```
## Simulate two stock prices
```
# just set the seed for the random number generator
np.random.seed(100)
# use returns to create a price series
drift = 100
r0 = pd.Series(np.random.normal(0, 1, 1000))
s0 = pd.Series(np.cumsum(r0), name='s0') + drift
noise1 = np.random.normal(0, 0.4, 1000)
drift1 = 50
r1 = r0 + noise1
s1 = pd.Series(np.cumsum(r1), name='s1') + drift1
noise2 = np.random.normal(0, 0.4, 1000)
drift2 = 60
r2 = r0 + noise2
s2 = pd.Series(np.cumsum(r2), name='s2') + drift2
pd.concat([s1, s2], axis=1).plot(figsize=(15,6))
plt.show()
## Plot data with scatterplot
sc = plt.scatter(s2, s1, s=30, edgecolor='b', alpha=0.7)
plt.xlabel('s2')
plt.ylabel('s1');
from sklearn.linear_model import LinearRegression
```
## Quiz: Linear Regression
Note that the LinearRegression().fit() expects 2D numpy arrays. Since s1 and s2 are pandas series, we can use Series.values to get the values as a numpy array. Since these are 1D arrays, we can use numpy.reshape(-1,1) to make these 1000 row by 1 column 2 dimensional arrays.
The coefficients of the linear regression, $\beta$ and $intercept$ for the regression line:
$y = \beta \times x + intercept$
Can be obtained after fitting to the data. Use `LinearRegression.coef_` for the slope (beta coefficients) and `LinearRegression.intercept_` for the intercept. You may want to practice accessing these outside of the function definition, to see if you'll need additional brackets `[]` to access the values.
```
def regression_slope_and_intercept(xSeries, ySeries):
"""
xSeries: pandas series, x variable
ySeries: pandas series, y variable
"""
lr = LinearRegression()
#TODO: get the values from each series, reshape to be 2 dimensional
#set s1 to the x variable, s2 to the y variable
xVar = xSeries.values.reshape(-1,1)
yVar = ySeries.values.reshape(-1,1)
#TODO: call LinearRegression.fit(). Pass in the x variable then y variable
lr.fit(xVar, yVar)
#TODO: obtain the slope and intercept
slope = lr.coef_[0][0]
intercept = lr.intercept_[0]
return (slope, intercept)
quiz_tests.test_regression_slope_and_intercept(regression_slope_and_intercept);
slope, intercept = regression_slope_and_intercept(s1,s2);
print(f"slope {slope:.2f} and intercept {intercept:.2f}")
```
### Plot the fitted regression line over the scatterplot
```
plt.scatter(s2, s1, s=30, edgecolor='b', alpha=0.5);
x = np.linspace(s1.min()-5, s1.max()+5, 2)
yPred = slope * x + intercept
plt.plot(yPred,x, alpha=0.2, lw=3, color='r')
plt.xlabel('s2')
plt.ylabel('s1');
```
If you're stuck, you can also check out the solution [here](regression_solution.ipynb)
|
github_jupyter
|
import sys
!{sys.executable} -m pip install -r requirements.txt
import pandas as pd
import numpy as np
import os
import helper
import quiz_tests
import matplotlib.pyplot as plt
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = (14, 8)
# just set the seed for the random number generator
np.random.seed(100)
# use returns to create a price series
drift = 100
r0 = pd.Series(np.random.normal(0, 1, 1000))
s0 = pd.Series(np.cumsum(r0), name='s0') + drift
noise1 = np.random.normal(0, 0.4, 1000)
drift1 = 50
r1 = r0 + noise1
s1 = pd.Series(np.cumsum(r1), name='s1') + drift1
noise2 = np.random.normal(0, 0.4, 1000)
drift2 = 60
r2 = r0 + noise2
s2 = pd.Series(np.cumsum(r2), name='s2') + drift2
pd.concat([s1, s2], axis=1).plot(figsize=(15,6))
plt.show()
## Plot data with scatterplot
sc = plt.scatter(s2, s1, s=30, edgecolor='b', alpha=0.7)
plt.xlabel('s2')
plt.ylabel('s1');
from sklearn.linear_model import LinearRegression
def regression_slope_and_intercept(xSeries, ySeries):
"""
xSeries: pandas series, x variable
ySeries: pandas series, y variable
"""
lr = LinearRegression()
#TODO: get the values from each series, reshape to be 2 dimensional
#set s1 to the x variable, s2 to the y variable
xVar = xSeries.values.reshape(-1,1)
yVar = ySeries.values.reshape(-1,1)
#TODO: call LinearRegression.fit(). Pass in the x variable then y variable
lr.fit(xVar, yVar)
#TODO: obtain the slope and intercept
slope = lr.coef_[0][0]
intercept = lr.intercept_[0]
return (slope, intercept)
quiz_tests.test_regression_slope_and_intercept(regression_slope_and_intercept);
slope, intercept = regression_slope_and_intercept(s1,s2);
print(f"slope {slope:.2f} and intercept {intercept:.2f}")
plt.scatter(s2, s1, s=30, edgecolor='b', alpha=0.5);
x = np.linspace(s1.min()-5, s1.max()+5, 2)
yPred = slope * x + intercept
plt.plot(yPred,x, alpha=0.2, lw=3, color='r')
plt.xlabel('s2')
plt.ylabel('s1');
| 0.418459 | 0.935228 |
## Introduction
This kernel shows how to use NBSVM (Naive Bayes - Support Vector Machine) to create a strong baseline for the [Toxic Comment Classification Challenge](https://www.kaggle.com/c/jigsaw-toxic-comment-classification-challenge) competition. NBSVM was introduced by Sida Wang and Chris Manning in the paper [Baselines and Bigrams: Simple, Good Sentiment and Topic Classification](https://nlp.stanford.edu/pubs/sidaw12_simple_sentiment.pdf). In this kernel, we use sklearn's logistic regression, rather than SVM, although in practice the two are nearly identical (sklearn uses the liblinear library behind the scenes).
If you're not familiar with naive bayes and bag of words matrices, I've made a preview available of one of fast.ai's upcoming *Practical Machine Learning* course videos, which introduces this topic. Here is a link to the section of the video which discusses this: [Naive Bayes video](https://youtu.be/37sFIak42Sc?t=3745).
```
import pandas as pd, numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
train = pd.read_csv('/Users/szkfzx/datasets/jigsaw-toxic-comment-classification-challenge/train.csv')
test = pd.read_csv('/Users/szkfzx/datasets/jigsaw-toxic-comment-classification-challenge/test.csv')
subm = pd.read_csv('/Users/szkfzx/datasets/jigsaw-toxic-comment-classification-challenge/sample_submission.csv')
```
## Looking at the data
The training data contains a row per comment, with an id, the text of the comment, and 6 different labels that we'll try to predict.
```
train.head()
```
Here's a couple of examples of comments, one toxic, and one with no labels.
```
train['comment_text'][0]
train['comment_text'][2]
```
The length of the comments varies a lot.
```
lens = train.comment_text.str.len()
lens.mean(), lens.std(), lens.max()
lens.hist()
```
We'll create a list of all the labels to predict, and we'll also create a 'none' label so we can see how many comments have no labels. We can then summarize the dataset.
```
label_cols = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
train['none'] = 1-train[label_cols].max(axis=1)
train.describe()
len(train),len(test)
```
There are a few empty comments that we need to get rid of, otherwise sklearn will complain.
```
COMMENT = 'comment_text'
train[COMMENT].fillna("unknown", inplace=True)
test[COMMENT].fillna("unknown", inplace=True)
subm.head()
```
## Building the model
We'll start by creating a *bag of words* representation, as a *term document matrix*. We'll use ngrams, as suggested in the NBSVM paper.
```
import re, string
re_tok = re.compile(f'([{string.punctuation}“”¨«»®´·º½¾¿¡§£₤‘’])')
def tokenize(s): return re_tok.sub(r' \1 ', s).split()
```
It turns out that using TF-IDF gives even better priors than the binarized features used in the paper. I don't think this has been mentioned in any paper before, but it improves leaderboard score from 0.59 to 0.55.
```
n = train.shape[0]
vec = TfidfVectorizer(ngram_range=(1,2), tokenizer=tokenize,
min_df=3, max_df=0.9, strip_accents='unicode', use_idf=1,
smooth_idf=1, sublinear_tf=1 )
trn_term_doc = vec.fit_transform(train[COMMENT])
test_term_doc = vec.transform(test[COMMENT])
```
This creates a *sparse matrix* with only a small number of non-zero elements (*stored elements* in the representation below).
```
trn_term_doc, test_term_doc
```
Here's the basic naive bayes feature equation【朴素贝叶斯特征方程】:
```
def pr(y_i, y):
p = x[y==y_i].sum(0)
return (p+1) / ((y==y_i).sum()+1)
x = trn_term_doc
test_x = test_term_doc
```
Fit a model for one dependent at a time:
```
def get_mdl(y):
y = y.values
r = np.log(pr(1,y) / pr(0,y))
m = LogisticRegression(C=4, dual=True)
x_nb = x.multiply(r)
return m.fit(x_nb, y), r
preds = np.zeros((len(test), len(label_cols)))
for i, j in enumerate(label_cols):
print('fit', j)
m,r = get_mdl(train[j])
preds[:,i] = m.predict_proba(test_x.multiply(r))[:,1]
```
And finally, create the submission file.
```
submid = pd.DataFrame({'id': subm["id"]})
submission = pd.concat([submid, pd.DataFrame(preds, columns = label_cols)], axis=1)
submission.to_csv('submission.csv', index=False)
submission.head()
```
|
github_jupyter
|
import pandas as pd, numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
train = pd.read_csv('/Users/szkfzx/datasets/jigsaw-toxic-comment-classification-challenge/train.csv')
test = pd.read_csv('/Users/szkfzx/datasets/jigsaw-toxic-comment-classification-challenge/test.csv')
subm = pd.read_csv('/Users/szkfzx/datasets/jigsaw-toxic-comment-classification-challenge/sample_submission.csv')
train.head()
train['comment_text'][0]
train['comment_text'][2]
lens = train.comment_text.str.len()
lens.mean(), lens.std(), lens.max()
lens.hist()
label_cols = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
train['none'] = 1-train[label_cols].max(axis=1)
train.describe()
len(train),len(test)
COMMENT = 'comment_text'
train[COMMENT].fillna("unknown", inplace=True)
test[COMMENT].fillna("unknown", inplace=True)
subm.head()
import re, string
re_tok = re.compile(f'([{string.punctuation}“”¨«»®´·º½¾¿¡§£₤‘’])')
def tokenize(s): return re_tok.sub(r' \1 ', s).split()
n = train.shape[0]
vec = TfidfVectorizer(ngram_range=(1,2), tokenizer=tokenize,
min_df=3, max_df=0.9, strip_accents='unicode', use_idf=1,
smooth_idf=1, sublinear_tf=1 )
trn_term_doc = vec.fit_transform(train[COMMENT])
test_term_doc = vec.transform(test[COMMENT])
trn_term_doc, test_term_doc
def pr(y_i, y):
p = x[y==y_i].sum(0)
return (p+1) / ((y==y_i).sum()+1)
x = trn_term_doc
test_x = test_term_doc
def get_mdl(y):
y = y.values
r = np.log(pr(1,y) / pr(0,y))
m = LogisticRegression(C=4, dual=True)
x_nb = x.multiply(r)
return m.fit(x_nb, y), r
preds = np.zeros((len(test), len(label_cols)))
for i, j in enumerate(label_cols):
print('fit', j)
m,r = get_mdl(train[j])
preds[:,i] = m.predict_proba(test_x.multiply(r))[:,1]
submid = pd.DataFrame({'id': subm["id"]})
submission = pd.concat([submid, pd.DataFrame(preds, columns = label_cols)], axis=1)
submission.to_csv('submission.csv', index=False)
submission.head()
| 0.391522 | 0.973894 |
# Introduction to Machine Learning and Toolkit Exercises
## Introduction
We will be using the iris data set for this tutorial. This is a well-known data set containing iris species and sepal and petal measurements. The data we will use are in a file called `Iris_Data.csv` found in the [data](../../data) directory.
```
from __future__ import print_function
import os
data_path = ['data']
print (data_path)
```
## Question 1
Load the data from the file using the techniques learned today. Examine it.
Determine the following:
* The number of data points (rows). (*Hint:* check out the dataframe `.shape` attribute.)
* The column names. (*Hint:* check out the dataframe `.columns` attribute.)
* The data types for each column. (*Hint:* check out the dataframe `.dtypes` attribute.)
```
import numpy as np
import pandas as pd
filepath = os.sep.join(data_path + ['Iris_Data.csv'])
print(filepath)
data = pd.read_csv(filepath)
data.head()
# Number of rows
print(data.shape[0])
# Column names
print(data.columns.tolist())
# Data types
print(data.dtypes)
```
## Question 2
Examine the species names and note that they all begin with 'Iris-'. Remove this portion of the name so the species name is shorter.
*Hint:* there are multiple ways to do this, but you could use either the [string processing methods](http://pandas.pydata.org/pandas-docs/stable/text.html) or the [apply method](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.apply.html).
```
# The str method maps the following function to each entry as a string
data['species'] = data.species.str.replace('Iris-', '')
# alternatively
# data['species'] = data.species.apply(lambda r: r.replace('Iris-', ''))
data.head()
```
## Question 3
Determine the following:
* The number of each species present. (*Hint:* check out the series `.value_counts` method.)
* The mean, median, and quantiles and ranges (max-min) for each petal and sepal measurement.
*Hint:* for the last question, the `.describe` method does have median, but it's not called median. It's the *50%* quantile. `.describe` does not have range though, and in order to get the range, you will need to create a new entry in the `.describe` table, which is `max - min`.
```
#Student writes code here
data.species.value_counts()
data.describe()
```
## Question 4
Calculate the following **for each species** in a separate dataframe:
* The mean of each measurement (sepal_length, sepal_width, petal_length, and petal_width).
* The median of each of these measurements.
*Hint:* you may want to use Pandas [`groupby` method](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.groupby.html) to group by species before calculating the statistic.
If you finish both of these, try calculating both statistics (mean and median) in a single table (i.e. with a single groupby call). See the section of the Pandas documentation on [applying multiple functions at once](http://pandas.pydata.org/pandas-docs/stable/groupby.html#applying-multiple-functions-at-once) for a hint.
```
# The mean calculation
data.groupby('species').mean()
# The median calculation
data.groupby('species').median()
# applying multiple functions at once - 2 methods
data.groupby('species').agg(['mean', 'median']) # passing a list of recognized strings
data.groupby('species').agg([np.mean, np.median]) # passing a list of explicit aggregation functions
# If certain fields need to be aggregated differently, we can do:
from pprint import pprint
agg_dict = {field: ['mean', 'median'] for field in data.columns if field != 'species'}
agg_dict['petal_length'] = 'max'
pprint(agg_dict)
data.groupby('species').agg(agg_dict)
```
## Question 5
Make a scatter plot of `sepal_length` vs `sepal_width` using Matplotlib. Label the axes and give the plot a title.
```
import matplotlib.pyplot as plt
%matplotlib inline
# A simple scatter plot with Matplotlib
ax = plt.axes()
ax.scatter(data.sepal_length, data.sepal_width)
# Label the axes
ax.set(xlabel='Sepal Length (cm)',
ylabel='Sepal Width (cm)',
title='Sepal Length vs Width');
```
## Question 6
Make a histogram of any one of the four features. Label axes and title it as appropriate.
```
#Student writes code here
# ax = plt.axes()
plt.hist(data.petal_length, bins = 5)
plt.xlabel("petal_length")
plt.ylabel("count")
plt.show()
```
## Question 7
Now create a single plot with histograms for each feature (`petal_width`, `petal_length`, `sepal_width`, `sepal_length`) overlayed. If you have time, next try to create four individual histogram plots in a single figure, where each plot contains one feature.
For some hints on how to do this with Pandas plotting methods, check out the [visualization guide](http://pandas.pydata.org/pandas-docs/version/0.18.1/visualization.html) for Pandas.
```
import seaborn as sns
sns.set_context('notebook')
# This uses the `.plot.hist` method
ax = data.plot.hist(bins=25, alpha=0.5, stacked=False)
ax.set_xlabel('Size (cm)');
# To create four separate plots, use Pandas `.hist` method
axList = data.hist(bins=25)
# Add some x- and y- labels to first column and last row
for ax in axList.flatten():
if ax.is_last_row():
ax.set_xlabel('Size (cm)')
if ax.is_first_col():
ax.set_ylabel('Frequency')
```
## Question 8
Using Pandas, make a boxplot of each petal and sepal measurement. Here is the documentation for [Pandas boxplot method](http://pandas.pydata.org/pandas-docs/version/0.18.1/visualization.html#visualization-box).
```
#Student writes code here
data.plot.box()
```
## Question 9
Now make a single boxplot where the features are separated in the x-axis and species are colored with different hues.
*Hint:* you may want to check the documentation for [Seaborn boxplots](http://seaborn.pydata.org/generated/seaborn.boxplot.html).
Also note that Seaborn is very picky about data format--for this plot to work, the input dataframe will need to be manipulated so that each row contains a single data point (a species, a measurement type, and the measurement value). Check out Pandas [stack](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.stack.html) method as a starting place.
Here is an example of a data format that will work:
| | species | measurement | size |
| - | ------- | ------------ | ---- |
| 0 | setosa | sepal_length | 5.1 |
| 1 | setosa | sepal_width | 3.5 |
```
# First we have to reshape the data so there is
# only a single measurement in each column
plot_data = (data
.set_index('species')
.stack()
.to_frame()
.reset_index()
.rename(columns={0:'size', 'level_1':'measurement'})
)
plot_data.head()
# Now plot the dataframe from above using Seaborn
sns.set_style('white')
sns.set_context('notebook')
sns.set_palette('dark')
f = plt.figure(figsize=(6,4))
sns.boxplot(x='measurement', y='size',
hue='species', data=plot_data);
```
## Question 10
Make a [pairplot](http://seaborn.pydata.org/generated/seaborn.pairplot.html) with Seaborn to examine the correlation between each of the measurements.
*Hint:* this plot may look complicated, but it is actually only a single line of code. This is the power of Seaborn and dataframe-aware plotting! See the lecture notes for reference.
```
#Student writes code here
sns.pairplot(data)
```
|
github_jupyter
|
from __future__ import print_function
import os
data_path = ['data']
print (data_path)
import numpy as np
import pandas as pd
filepath = os.sep.join(data_path + ['Iris_Data.csv'])
print(filepath)
data = pd.read_csv(filepath)
data.head()
# Number of rows
print(data.shape[0])
# Column names
print(data.columns.tolist())
# Data types
print(data.dtypes)
# The str method maps the following function to each entry as a string
data['species'] = data.species.str.replace('Iris-', '')
# alternatively
# data['species'] = data.species.apply(lambda r: r.replace('Iris-', ''))
data.head()
#Student writes code here
data.species.value_counts()
data.describe()
# The mean calculation
data.groupby('species').mean()
# The median calculation
data.groupby('species').median()
# applying multiple functions at once - 2 methods
data.groupby('species').agg(['mean', 'median']) # passing a list of recognized strings
data.groupby('species').agg([np.mean, np.median]) # passing a list of explicit aggregation functions
# If certain fields need to be aggregated differently, we can do:
from pprint import pprint
agg_dict = {field: ['mean', 'median'] for field in data.columns if field != 'species'}
agg_dict['petal_length'] = 'max'
pprint(agg_dict)
data.groupby('species').agg(agg_dict)
import matplotlib.pyplot as plt
%matplotlib inline
# A simple scatter plot with Matplotlib
ax = plt.axes()
ax.scatter(data.sepal_length, data.sepal_width)
# Label the axes
ax.set(xlabel='Sepal Length (cm)',
ylabel='Sepal Width (cm)',
title='Sepal Length vs Width');
#Student writes code here
# ax = plt.axes()
plt.hist(data.petal_length, bins = 5)
plt.xlabel("petal_length")
plt.ylabel("count")
plt.show()
import seaborn as sns
sns.set_context('notebook')
# This uses the `.plot.hist` method
ax = data.plot.hist(bins=25, alpha=0.5, stacked=False)
ax.set_xlabel('Size (cm)');
# To create four separate plots, use Pandas `.hist` method
axList = data.hist(bins=25)
# Add some x- and y- labels to first column and last row
for ax in axList.flatten():
if ax.is_last_row():
ax.set_xlabel('Size (cm)')
if ax.is_first_col():
ax.set_ylabel('Frequency')
#Student writes code here
data.plot.box()
# First we have to reshape the data so there is
# only a single measurement in each column
plot_data = (data
.set_index('species')
.stack()
.to_frame()
.reset_index()
.rename(columns={0:'size', 'level_1':'measurement'})
)
plot_data.head()
# Now plot the dataframe from above using Seaborn
sns.set_style('white')
sns.set_context('notebook')
sns.set_palette('dark')
f = plt.figure(figsize=(6,4))
sns.boxplot(x='measurement', y='size',
hue='species', data=plot_data);
#Student writes code here
sns.pairplot(data)
| 0.464416 | 0.974965 |
# Ungraded Lab: Hyperparameter tuning and model training with TFX
In this lab, you will be again doing hyperparameter tuning but this time, it will be within a [Tensorflow Extended (TFX)](https://www.tensorflow.org/tfx/) pipeline.
We have already introduced some TFX components in Course 2 of this specialization related to data ingestion, validation, and transformation. In this notebook, you will get to work with two more which are related to model development and training: *Tuner* and *Trainer*.
<img src='https://www.tensorflow.org/tfx/guide/images/prog_trainer.png' alt='tfx pipeline'>
image source: https://www.tensorflow.org/tfx/guide
* The *Tuner* utilizes the [Keras Tuner](https://keras-team.github.io/keras-tuner/) API under the hood to tune your model's hyperparameters.
* You can get the best set of hyperparameters from the Tuner component and feed it into the *Trainer* component to optimize your model for training.
You will again be working with the [FashionMNIST](https://github.com/zalandoresearch/fashion-mnist) dataset and will feed it though the TFX pipeline up to the Trainer component.You will quickly review the earlier components from Course 2, then focus on the two new components introduced.
Let's begin!
## Setup
### Install TFX
You will first install [TFX](https://www.tensorflow.org/tfx), a framework for developing end-to-end machine learning pipelines.
```
!pip install -U pip
!pip install -U tfx==1.3
# These are downgraded to work with the packages used by TFX 1.3
# Please do not delete because it will cause import errors in the next cell
!pip install --upgrade tensorflow-estimator==2.6.0
!pip install --upgrade keras==2.6.0
```
*Note: In Google Colab, you need to restart the runtime at this point to finalize updating the packages you just installed. You can do so by clicking the `Restart Runtime` at the end of the output cell above (after installation), or by selecting `Runtime > Restart Runtime` in the Menu bar. **Please do not proceed to the next section without restarting.** You can also ignore the errors about version incompatibility of some of the bundled packages because we won't be using those in this notebook.*
### Imports
You will then import the packages you will need for this exercise.
```
import tensorflow as tf
from tensorflow import keras
import tensorflow_datasets as tfds
import os
import pprint
from tfx.components import ImportExampleGen
from tfx.components import ExampleValidator
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Transform
from tfx.components import Tuner
from tfx.components import Trainer
from tfx.proto import example_gen_pb2
from tfx.orchestration.experimental.interactive.interactive_context import InteractiveContext
```
## Download and prepare the dataset
As mentioned earlier, you will be using the Fashion MNIST dataset just like in the previous lab. This will allow you to compare the similarities and differences when using Keras Tuner as a standalone library and within an ML pipeline.
You will first need to setup the directories that you will use to store the dataset, as well as the pipeline artifacts and metadata store.
```
# Location of the pipeline metadata store
_pipeline_root = './pipeline/'
# Directory of the raw data files
_data_root = './data/fmnist'
# Temporary directory
tempdir = './tempdir'
# Create the dataset directory
!mkdir -p {_data_root}
# Create the TFX pipeline files directory
!mkdir {_pipeline_root}
```
You will now download FashionMNIST from [Tensorflow Datasets](https://www.tensorflow.org/datasets). The `with_info` flag will be set to `True` so you can display information about the dataset in the next cell (i.e. using `ds_info`).
```
# Download the dataset
ds, ds_info = tfds.load('fashion_mnist', data_dir=tempdir, with_info=True)
# Display info about the dataset
print(ds_info)
```
You can review the downloaded files with the code below. For this lab, you will be using the *train* TFRecord so you will need to take note of its filename. You will not use the *test* TFRecord in this lab.
```
# Define the location of the train tfrecord downloaded via TFDS
tfds_data_path = f'{tempdir}/{ds_info.name}/{ds_info.version}'
# Display contents of the TFDS data directory
print(os.listdir(tfds_data_path))
print(os.listdir(tempdir))
```
You will then copy the train split from the downloaded data so it can be consumed by the ExampleGen component in the next step. This component requires that your files are in a directory without extra files (e.g. JSONs and TXT files).
```
# Define the train tfrecord filenameplit from the downloaded data so it can be consumed by the ExampleGen component in the next step.
#This component requires that your files are in a directory without extra files (e.g. JSONs and TXT files).
train_filename = 'fashion_mnist-train.tfrecord-00000-of-00001'
# Copy the train tfrecord into the data root folder
!cp {tfds_data_path}/{train_filename} {_data_root}
```
## TFX Pipeline
With the setup complete, you can now proceed to creating the pipeline.
### Initialize the Interactive Context
You will start by initializing the [InteractiveContext](https://github.com/tensorflow/tfx/blob/master/tfx/orchestration/experimental/interactive/interactive_context.py) so you can run the components within this Colab environment. You can safely ignore the warning because you will just be using a local SQLite file for the metadata store.
```
# Initialize the InteractiveContext
context = InteractiveContext(pipeline_root=_pipeline_root)
```
### ExampleGen
You will start the pipeline by ingesting the TFRecord you set aside. The [ImportExampleGen](https://www.tensorflow.org/tfx/api_docs/python/tfx/components/ImportExampleGen) consumes TFRecords and you can specify splits as shown below. For this exercise, you will split the train tfrecord to use 80% for the train set, and the remaining 20% as eval/validation set.
```
# Specify 80/20 split for the train and eval set
output = example_gen_pb2.Output(
split_config=example_gen_pb2.SplitConfig(splits=[
example_gen_pb2.SplitConfig.Split(name='train', hash_buckets=8),
example_gen_pb2.SplitConfig.Split(name='eval', hash_buckets=2),
]))
# Ingest the data through ExampleGen
example_gen = ImportExampleGen(input_base=_data_root, output_config=output)
# Run the component
context.run(example_gen)
# Print split names and URI
artifact = example_gen.outputs['examples'].get()[0]
print(artifact.split_names, artifact.uri)
```
### StatisticsGen
Next, you will compute the statistics of the dataset with the [StatisticsGen](https://www.tensorflow.org/tfx/guide/statsgen) component.
```
# Run StatisticsGen
statistics_gen = StatisticsGen(
examples=example_gen.outputs['examples'])
context.run(statistics_gen)
```
### SchemaGen
You can then infer the dataset schema with [SchemaGen](https://www.tensorflow.org/tfx/guide/schemagen). This will be used to validate incoming data to ensure that it is formatted correctly.
```
# Run SchemaGen
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True)
context.run(schema_gen)
# Visualize the results
context.show(schema_gen.outputs['schema'])
```
### ExampleValidator
You can assume that the dataset is clean since we downloaded it from TFDS. But just to review, let's run it through [ExampleValidator](https://www.tensorflow.org/tfx/guide/exampleval) to detect if there are anomalies within the dataset.
```
# Run ExampleValidator
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
context.run(example_validator)
# Visualize the results. There should be no anomalies.
context.show(example_validator.outputs['anomalies'])
```
### Transform
Let's now use the [Transform](https://www.tensorflow.org/tfx/guide/transform) component to scale the image pixels and convert the data types to float. You will first define the transform module containing these operations before you run the component.
```
_transform_module_file = 'fmnist_transform.py'
%%writefile {_transform_module_file}
import tensorflow as tf
import tensorflow_transform as tft
# Keys
_LABEL_KEY = 'label'
_IMAGE_KEY = 'image'
def _transformed_name(key):
return key + '_xf'
def _image_parser(image_str):
'''converts the images to a float tensor'''
image = tf.image.decode_image(image_str, channels=1)
image = tf.reshape(image, (28, 28, 1))
image = tf.cast(image, tf.float32)
return image
def _label_parser(label_id):
'''converts the labels to a float tensor'''
label = tf.cast(label_id, tf.float32)
return label
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
# Convert the raw image and labels to a float array
with tf.device("/cpu:0"):
outputs = {
_transformed_name(_IMAGE_KEY):
tf.map_fn(
_image_parser,
tf.squeeze(inputs[_IMAGE_KEY], axis=1),
dtype=tf.float32),
_transformed_name(_LABEL_KEY):
tf.map_fn(
_label_parser,
inputs[_LABEL_KEY],
dtype=tf.float32)
}
# scale the pixels from 0 to 1
outputs[_transformed_name(_IMAGE_KEY)] = tft.scale_to_0_1(outputs[_transformed_name(_IMAGE_KEY)])
return outputs
```
You will run the component by passing in the examples, schema, and transform module file.
*Note: You can safely ignore the warnings and `udf_utils` related errors.*
```
# Ignore TF warning messages
tf.get_logger().setLevel('ERROR')
# Setup the Transform component
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=os.path.abspath(_transform_module_file))
# Run the component
context.run(transform)
```
### Tuner
As the name suggests, the [Tuner](https://www.tensorflow.org/tfx/guide/tuner) component tunes the hyperparameters of your model. To use this, you will need to provide a *tuner module file* which contains a `tuner_fn()` function. In this function, you will mostly do the same steps as you did in the previous ungraded lab but with some key differences in handling the dataset.
The Transform component earlier saved the transformed examples as TFRecords compressed in `.gz` format and you will need to load that into memory. Once loaded, you will need to create batches of features and labels so you can finally use it for hypertuning. This process is modularized in the `_input_fn()` below.
Going back, the `tuner_fn()` function will return a `TunerFnResult` [namedtuple](https://docs.python.org/3/library/collections.html#collections.namedtuple) containing your `tuner` object and a set of arguments to pass to `tuner.search()` method. You will see these in action in the following cells. When reviewing the module file, we recommend viewing the `tuner_fn()` first before looking at the other auxiliary functions.
```
# Declare name of module file
_tuner_module_file = 'tuner.py'
%%writefile {_tuner_module_file}
# Define imports
from kerastuner.engine import base_tuner
import kerastuner as kt
from tensorflow import keras
from typing import NamedTuple, Dict, Text, Any, List
from tfx.components.trainer.fn_args_utils import FnArgs, DataAccessor
import tensorflow as tf
import tensorflow_transform as tft
# Declare namedtuple field names
TunerFnResult = NamedTuple('TunerFnResult', [('tuner', base_tuner.BaseTuner),
('fit_kwargs', Dict[Text, Any])])
# Label key
LABEL_KEY = 'label_xf'
# Callback for the search strategy
stop_early = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5)
def _gzip_reader_fn(filenames):
'''Load compressed dataset
Args:
filenames - filenames of TFRecords to load
Returns:
TFRecordDataset loaded from the filenames
'''
# Load the dataset. Specify the compression type since it is saved as `.gz`
return tf.data.TFRecordDataset(filenames, compression_type='GZIP')
def _input_fn(file_pattern,
tf_transform_output,
num_epochs=None,
batch_size=32) -> tf.data.Dataset:
'''Create batches of features and labels from TF Records
Args:
file_pattern - List of files or patterns of file paths containing Example records.
tf_transform_output - transform output graph
num_epochs - Integer specifying the number of times to read through the dataset.
If None, cycles through the dataset forever.
batch_size - An int representing the number of records to combine in a single batch.
Returns:
A dataset of dict elements, (or a tuple of dict elements and label).
Each dict maps feature keys to Tensor or SparseTensor objects.
'''
# Get feature specification based on transform output
transformed_feature_spec = (
tf_transform_output.transformed_feature_spec().copy())
# Create batches of features and labels
dataset = tf.data.experimental.make_batched_features_dataset(
file_pattern=file_pattern,
batch_size=batch_size,
features=transformed_feature_spec,
reader=_gzip_reader_fn,
num_epochs=num_epochs,
label_key=LABEL_KEY)
return dataset
def model_builder(hp):
'''
Builds the model and sets up the hyperparameters to tune.
Args:
hp - Keras tuner object
Returns:
model with hyperparameters to tune
'''
# Initialize the Sequential API and start stacking the layers
model = keras.Sequential()
model.add(keras.layers.Flatten(input_shape=(28, 28, 1)))
# Tune the number of units in the first Dense layer
# Choose an optimal value between 32-512
hp_units = hp.Int('units', min_value=32, max_value=512, step=32)
model.add(keras.layers.Dense(units=hp_units, activation='relu', name='dense_1'))
# Add next layers
model.add(keras.layers.Dropout(0.2))
model.add(keras.layers.Dense(10, activation='softmax'))
# Tune the learning rate for the optimizer
# Choose an optimal value from 0.01, 0.001, or 0.0001
hp_learning_rate = hp.Choice('learning_rate', values=[1e-2, 1e-3, 1e-4])
model.compile(optimizer=keras.optimizers.Adam(learning_rate=hp_learning_rate),
loss=keras.losses.SparseCategoricalCrossentropy(),
metrics=['accuracy'])
return model
def tuner_fn(fn_args: FnArgs) -> TunerFnResult:
"""Build the tuner using the KerasTuner API.
Args:
fn_args: Holds args as name/value pairs.
- working_dir: working dir for tuning.
- train_files: List of file paths containing training tf.Example data.
- eval_files: List of file paths containing eval tf.Example data.
- train_steps: number of train steps.
- eval_steps: number of eval steps.
- schema_path: optional schema of the input data.
- transform_graph_path: optional transform graph produced by TFT.
Returns:
A namedtuple contains the following:
- tuner: A BaseTuner that will be used for tuning.
- fit_kwargs: Args to pass to tuner's run_trial function for fitting the
model , e.g., the training and validation dataset. Required
args depend on the above tuner's implementation.
"""
# Define tuner search strategy
tuner = kt.Hyperband(model_builder,
objective='val_accuracy',
max_epochs=10,
factor=3,
directory=fn_args.working_dir,
project_name='kt_hyperband')
# Load transform output
tf_transform_output = tft.TFTransformOutput(fn_args.transform_graph_path)
# Use _input_fn() to extract input features and labels from the train and val set
train_set = _input_fn(fn_args.train_files[0], tf_transform_output)
val_set = _input_fn(fn_args.eval_files[0], tf_transform_output)
return TunerFnResult(
tuner=tuner,
fit_kwargs={
"callbacks":[stop_early],
'x': train_set,
'validation_data': val_set,
'steps_per_epoch': fn_args.train_steps,
'validation_steps': fn_args.eval_steps
}
)
```
With the module defined, you can now setup the Tuner component. You can see the description of each argument [here](https://www.tensorflow.org/tfx/api_docs/python/tfx/components/Tuner).
Notice that we passed a `num_steps` argument to the train and eval args and this was used in the `steps_per_epoch` and `validation_steps` arguments in the tuner module above. This can be useful if you don't want to go through the entire dataset when tuning. For example, if you have 10GB of training data, it would be incredibly time consuming if you will iterate through it entirely just for one epoch and one set of hyperparameters. You can set the number of steps so your program will only go through a fraction of the dataset.
You can compute for the total number of steps in one epoch by: `number of examples / batch size`. For this particular example, we have `48000 examples / 32 (default size)` which equals `1500` steps per epoch for the train set (compute val steps from 12000 examples). Since you passed `500` in the `num_steps` of the train args, this means that some examples will be skipped. This will likely result in lower accuracy readings but will save time in doing the hypertuning. Try modifying this value later and see if you arrive at the same set of hyperparameters.
```
from tfx.proto import trainer_pb2
# Setup the Tuner component
tuner = Tuner(
module_file=_tuner_module_file,
examples=transform.outputs['transformed_examples'],
transform_graph=transform.outputs['transform_graph'],
schema=schema_gen.outputs['schema'],
train_args=trainer_pb2.TrainArgs(splits=['train'], num_steps=500),
eval_args=trainer_pb2.EvalArgs(splits=['eval'], num_steps=100)
)
# Run the component. This will take around 10 minutes to run.
# When done, it will summarize the results and show the 10 best trials.
context.run(tuner, enable_cache=False)
```
### Trainer
Like the Tuner component, the [Trainer](https://www.tensorflow.org/tfx/guide/trainer) component also requires a module file to setup the training process. It will look for a `run_fn()` function that defines and trains the model. The steps will look similar to the tuner module file:
* Define the model - You can get the results of the Tuner component through the `fn_args.hyperparameters` argument. You will see it passed into the `model_builder()` function below. If you didn't run `Tuner`, then you can just explicitly define the number of hidden units and learning rate.
* Load the train and validation sets - You have done this in the Tuner component. For this module, you will pass in a `num_epochs` value (10) to indicate how many batches will be prepared. You can opt not to do this and pass a `num_steps` value as before.
* Setup and train the model - This will look very familiar if you're already used to the [Keras Models Training API](https://keras.io/api/models/model_training_apis/). You can pass in callbacks like the [TensorBoard callback](https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/TensorBoard) so you can visualize the results later.
* Save the model - This is needed so you can analyze and serve your model. You will get to do this in later parts of the course and specialization.
```
# Declare trainer module file
_trainer_module_file = 'trainer.py'
%%writefile {_trainer_module_file}
from tensorflow import keras
from typing import NamedTuple, Dict, Text, Any, List
from tfx.components.trainer.fn_args_utils import FnArgs, DataAccessor
import tensorflow as tf
import tensorflow_transform as tft
# Define the label key
LABEL_KEY = 'label_xf'
def _gzip_reader_fn(filenames):
'''Load compressed dataset
Args:
filenames - filenames of TFRecords to load
Returns:
TFRecordDataset loaded from the filenames
'''
# Load the dataset. Specify the compression type since it is saved as `.gz`
return tf.data.TFRecordDataset(filenames, compression_type='GZIP')
def _input_fn(file_pattern,
tf_transform_output,
num_epochs=None,
batch_size=32) -> tf.data.Dataset:
'''Create batches of features and labels from TF Records
Args:
file_pattern - List of files or patterns of file paths containing Example records.
tf_transform_output - transform output graph
num_epochs - Integer specifying the number of times to read through the dataset.
If None, cycles through the dataset forever.
batch_size - An int representing the number of records to combine in a single batch.
Returns:
A dataset of dict elements, (or a tuple of dict elements and label).
Each dict maps feature keys to Tensor or SparseTensor objects.
'''
transformed_feature_spec = (
tf_transform_output.transformed_feature_spec().copy())
dataset = tf.data.experimental.make_batched_features_dataset(
file_pattern=file_pattern,
batch_size=batch_size,
features=transformed_feature_spec,
reader=_gzip_reader_fn,
num_epochs=num_epochs,
label_key=LABEL_KEY)
return dataset
def model_builder(hp):
'''
Builds the model and sets up the hyperparameters to tune.
Args:
hp - Keras tuner object
Returns:
model with hyperparameters to tune
'''
# Initialize the Sequential API and start stacking the layers
model = keras.Sequential()
model.add(keras.layers.Flatten(input_shape=(28, 28, 1)))
# Get the number of units from the Tuner results
hp_units = hp.get('units')
model.add(keras.layers.Dense(units=hp_units, activation='relu'))
# Add next layers
model.add(keras.layers.Dropout(0.2))
model.add(keras.layers.Dense(10, activation='softmax'))
# Get the learning rate from the Tuner results
hp_learning_rate = hp.get('learning_rate')
# Setup model for training
model.compile(optimizer=keras.optimizers.Adam(learning_rate=hp_learning_rate),
loss=keras.losses.SparseCategoricalCrossentropy(),
metrics=['accuracy'])
# Print the model summary
model.summary()
return model
def run_fn(fn_args: FnArgs) -> None:
"""Defines and trains the model.
Args:
fn_args: Holds args as name/value pairs. Refer here for the complete attributes:
https://www.tensorflow.org/tfx/api_docs/python/tfx/components/trainer/fn_args_utils/FnArgs#attributes
"""
# Callback for TensorBoard
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=fn_args.model_run_dir, update_freq='batch')
# Load transform output
tf_transform_output = tft.TFTransformOutput(fn_args.transform_graph_path)
# Create batches of data good for 10 epochs
train_set = _input_fn(fn_args.train_files[0], tf_transform_output, 10)
val_set = _input_fn(fn_args.eval_files[0], tf_transform_output, 10)
# Load best hyperparameters
hp = fn_args.hyperparameters.get('values')
# Build the model
model = model_builder(hp)
# Train the model
model.fit(
x=train_set,
validation_data=val_set,
callbacks=[tensorboard_callback]
)
# Save the model
model.save(fn_args.serving_model_dir, save_format='tf')
```
You can pass the output of the `Tuner` component to the `Trainer` by filling the `hyperparameters` argument with the `Tuner` output. This is indicated by the `tuner.outputs['best_hyperparameters']` below. You can see the definition of the other arguments [here](https://www.tensorflow.org/tfx/api_docs/python/tfx/components/Trainer).
```
# Setup the Trainer component
trainer = Trainer(
module_file=_trainer_module_file,
examples=transform.outputs['transformed_examples'],
hyperparameters=tuner.outputs['best_hyperparameters'],
transform_graph=transform.outputs['transform_graph'],
schema=schema_gen.outputs['schema'],
train_args=trainer_pb2.TrainArgs(splits=['train']),
eval_args=trainer_pb2.EvalArgs(splits=['eval']))
```
Take note that when re-training your model, you don't always have to retune your hyperparameters. Once you have a set that you think performs well, you can just import it with the ImporterNode as shown in the [official docs](https://www.tensorflow.org/tfx/guide/tuner):
```
hparams_importer = ImporterNode(
instance_name='import_hparams',
# This can be Tuner's output file or manually edited file. The file contains
# text format of hyperparameters (kerastuner.HyperParameters.get_config())
source_uri='path/to/best_hyperparameters.txt',
artifact_type=HyperParameters)
trainer = Trainer(
...
# An alternative is directly use the tuned hyperparameters in Trainer's user
# module code and set hyperparameters to None here.
hyperparameters = hparams_importer.outputs['result'])
```
```
# Run the component
context.run(trainer, enable_cache=False)
```
Your model should now be saved in your pipeline directory and you can navigate through it as shown below. The file is saved as `saved_model.pb`.
```
# Get artifact uri of trainer model output
model_artifact_dir = trainer.outputs['model'].get()[0].uri
# List subdirectories artifact uri
print(f'contents of model artifact directory:{os.listdir(model_artifact_dir)}')
# Define the model directory
model_dir = os.path.join(model_artifact_dir, 'Format-Serving')
# List contents of model directory
print(f'contents of model directory: {os.listdir(model_dir)}')
```
You can also visualize the training results by loading the logs saved by the Tensorboard callback.
```
model_run_artifact_dir = trainer.outputs['model_run'].get()[0].uri
%load_ext tensorboard
%tensorboard --logdir {model_run_artifact_dir}
```
***Congratulations! You have now created an ML pipeline that includes hyperparameter tuning and model training. You will know more about the next components in future lessons but in the next section, you will first learn about a framework for automatically building ML pipelines: AutoML. Enjoy the rest of the course!***
|
github_jupyter
|
!pip install -U pip
!pip install -U tfx==1.3
# These are downgraded to work with the packages used by TFX 1.3
# Please do not delete because it will cause import errors in the next cell
!pip install --upgrade tensorflow-estimator==2.6.0
!pip install --upgrade keras==2.6.0
import tensorflow as tf
from tensorflow import keras
import tensorflow_datasets as tfds
import os
import pprint
from tfx.components import ImportExampleGen
from tfx.components import ExampleValidator
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Transform
from tfx.components import Tuner
from tfx.components import Trainer
from tfx.proto import example_gen_pb2
from tfx.orchestration.experimental.interactive.interactive_context import InteractiveContext
# Location of the pipeline metadata store
_pipeline_root = './pipeline/'
# Directory of the raw data files
_data_root = './data/fmnist'
# Temporary directory
tempdir = './tempdir'
# Create the dataset directory
!mkdir -p {_data_root}
# Create the TFX pipeline files directory
!mkdir {_pipeline_root}
# Download the dataset
ds, ds_info = tfds.load('fashion_mnist', data_dir=tempdir, with_info=True)
# Display info about the dataset
print(ds_info)
# Define the location of the train tfrecord downloaded via TFDS
tfds_data_path = f'{tempdir}/{ds_info.name}/{ds_info.version}'
# Display contents of the TFDS data directory
print(os.listdir(tfds_data_path))
print(os.listdir(tempdir))
# Define the train tfrecord filenameplit from the downloaded data so it can be consumed by the ExampleGen component in the next step.
#This component requires that your files are in a directory without extra files (e.g. JSONs and TXT files).
train_filename = 'fashion_mnist-train.tfrecord-00000-of-00001'
# Copy the train tfrecord into the data root folder
!cp {tfds_data_path}/{train_filename} {_data_root}
# Initialize the InteractiveContext
context = InteractiveContext(pipeline_root=_pipeline_root)
# Specify 80/20 split for the train and eval set
output = example_gen_pb2.Output(
split_config=example_gen_pb2.SplitConfig(splits=[
example_gen_pb2.SplitConfig.Split(name='train', hash_buckets=8),
example_gen_pb2.SplitConfig.Split(name='eval', hash_buckets=2),
]))
# Ingest the data through ExampleGen
example_gen = ImportExampleGen(input_base=_data_root, output_config=output)
# Run the component
context.run(example_gen)
# Print split names and URI
artifact = example_gen.outputs['examples'].get()[0]
print(artifact.split_names, artifact.uri)
# Run StatisticsGen
statistics_gen = StatisticsGen(
examples=example_gen.outputs['examples'])
context.run(statistics_gen)
# Run SchemaGen
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True)
context.run(schema_gen)
# Visualize the results
context.show(schema_gen.outputs['schema'])
# Run ExampleValidator
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
context.run(example_validator)
# Visualize the results. There should be no anomalies.
context.show(example_validator.outputs['anomalies'])
_transform_module_file = 'fmnist_transform.py'
%%writefile {_transform_module_file}
import tensorflow as tf
import tensorflow_transform as tft
# Keys
_LABEL_KEY = 'label'
_IMAGE_KEY = 'image'
def _transformed_name(key):
return key + '_xf'
def _image_parser(image_str):
'''converts the images to a float tensor'''
image = tf.image.decode_image(image_str, channels=1)
image = tf.reshape(image, (28, 28, 1))
image = tf.cast(image, tf.float32)
return image
def _label_parser(label_id):
'''converts the labels to a float tensor'''
label = tf.cast(label_id, tf.float32)
return label
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
# Convert the raw image and labels to a float array
with tf.device("/cpu:0"):
outputs = {
_transformed_name(_IMAGE_KEY):
tf.map_fn(
_image_parser,
tf.squeeze(inputs[_IMAGE_KEY], axis=1),
dtype=tf.float32),
_transformed_name(_LABEL_KEY):
tf.map_fn(
_label_parser,
inputs[_LABEL_KEY],
dtype=tf.float32)
}
# scale the pixels from 0 to 1
outputs[_transformed_name(_IMAGE_KEY)] = tft.scale_to_0_1(outputs[_transformed_name(_IMAGE_KEY)])
return outputs
# Ignore TF warning messages
tf.get_logger().setLevel('ERROR')
# Setup the Transform component
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=os.path.abspath(_transform_module_file))
# Run the component
context.run(transform)
# Declare name of module file
_tuner_module_file = 'tuner.py'
%%writefile {_tuner_module_file}
# Define imports
from kerastuner.engine import base_tuner
import kerastuner as kt
from tensorflow import keras
from typing import NamedTuple, Dict, Text, Any, List
from tfx.components.trainer.fn_args_utils import FnArgs, DataAccessor
import tensorflow as tf
import tensorflow_transform as tft
# Declare namedtuple field names
TunerFnResult = NamedTuple('TunerFnResult', [('tuner', base_tuner.BaseTuner),
('fit_kwargs', Dict[Text, Any])])
# Label key
LABEL_KEY = 'label_xf'
# Callback for the search strategy
stop_early = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5)
def _gzip_reader_fn(filenames):
'''Load compressed dataset
Args:
filenames - filenames of TFRecords to load
Returns:
TFRecordDataset loaded from the filenames
'''
# Load the dataset. Specify the compression type since it is saved as `.gz`
return tf.data.TFRecordDataset(filenames, compression_type='GZIP')
def _input_fn(file_pattern,
tf_transform_output,
num_epochs=None,
batch_size=32) -> tf.data.Dataset:
'''Create batches of features and labels from TF Records
Args:
file_pattern - List of files or patterns of file paths containing Example records.
tf_transform_output - transform output graph
num_epochs - Integer specifying the number of times to read through the dataset.
If None, cycles through the dataset forever.
batch_size - An int representing the number of records to combine in a single batch.
Returns:
A dataset of dict elements, (or a tuple of dict elements and label).
Each dict maps feature keys to Tensor or SparseTensor objects.
'''
# Get feature specification based on transform output
transformed_feature_spec = (
tf_transform_output.transformed_feature_spec().copy())
# Create batches of features and labels
dataset = tf.data.experimental.make_batched_features_dataset(
file_pattern=file_pattern,
batch_size=batch_size,
features=transformed_feature_spec,
reader=_gzip_reader_fn,
num_epochs=num_epochs,
label_key=LABEL_KEY)
return dataset
def model_builder(hp):
'''
Builds the model and sets up the hyperparameters to tune.
Args:
hp - Keras tuner object
Returns:
model with hyperparameters to tune
'''
# Initialize the Sequential API and start stacking the layers
model = keras.Sequential()
model.add(keras.layers.Flatten(input_shape=(28, 28, 1)))
# Tune the number of units in the first Dense layer
# Choose an optimal value between 32-512
hp_units = hp.Int('units', min_value=32, max_value=512, step=32)
model.add(keras.layers.Dense(units=hp_units, activation='relu', name='dense_1'))
# Add next layers
model.add(keras.layers.Dropout(0.2))
model.add(keras.layers.Dense(10, activation='softmax'))
# Tune the learning rate for the optimizer
# Choose an optimal value from 0.01, 0.001, or 0.0001
hp_learning_rate = hp.Choice('learning_rate', values=[1e-2, 1e-3, 1e-4])
model.compile(optimizer=keras.optimizers.Adam(learning_rate=hp_learning_rate),
loss=keras.losses.SparseCategoricalCrossentropy(),
metrics=['accuracy'])
return model
def tuner_fn(fn_args: FnArgs) -> TunerFnResult:
"""Build the tuner using the KerasTuner API.
Args:
fn_args: Holds args as name/value pairs.
- working_dir: working dir for tuning.
- train_files: List of file paths containing training tf.Example data.
- eval_files: List of file paths containing eval tf.Example data.
- train_steps: number of train steps.
- eval_steps: number of eval steps.
- schema_path: optional schema of the input data.
- transform_graph_path: optional transform graph produced by TFT.
Returns:
A namedtuple contains the following:
- tuner: A BaseTuner that will be used for tuning.
- fit_kwargs: Args to pass to tuner's run_trial function for fitting the
model , e.g., the training and validation dataset. Required
args depend on the above tuner's implementation.
"""
# Define tuner search strategy
tuner = kt.Hyperband(model_builder,
objective='val_accuracy',
max_epochs=10,
factor=3,
directory=fn_args.working_dir,
project_name='kt_hyperband')
# Load transform output
tf_transform_output = tft.TFTransformOutput(fn_args.transform_graph_path)
# Use _input_fn() to extract input features and labels from the train and val set
train_set = _input_fn(fn_args.train_files[0], tf_transform_output)
val_set = _input_fn(fn_args.eval_files[0], tf_transform_output)
return TunerFnResult(
tuner=tuner,
fit_kwargs={
"callbacks":[stop_early],
'x': train_set,
'validation_data': val_set,
'steps_per_epoch': fn_args.train_steps,
'validation_steps': fn_args.eval_steps
}
)
from tfx.proto import trainer_pb2
# Setup the Tuner component
tuner = Tuner(
module_file=_tuner_module_file,
examples=transform.outputs['transformed_examples'],
transform_graph=transform.outputs['transform_graph'],
schema=schema_gen.outputs['schema'],
train_args=trainer_pb2.TrainArgs(splits=['train'], num_steps=500),
eval_args=trainer_pb2.EvalArgs(splits=['eval'], num_steps=100)
)
# Run the component. This will take around 10 minutes to run.
# When done, it will summarize the results and show the 10 best trials.
context.run(tuner, enable_cache=False)
# Declare trainer module file
_trainer_module_file = 'trainer.py'
%%writefile {_trainer_module_file}
from tensorflow import keras
from typing import NamedTuple, Dict, Text, Any, List
from tfx.components.trainer.fn_args_utils import FnArgs, DataAccessor
import tensorflow as tf
import tensorflow_transform as tft
# Define the label key
LABEL_KEY = 'label_xf'
def _gzip_reader_fn(filenames):
'''Load compressed dataset
Args:
filenames - filenames of TFRecords to load
Returns:
TFRecordDataset loaded from the filenames
'''
# Load the dataset. Specify the compression type since it is saved as `.gz`
return tf.data.TFRecordDataset(filenames, compression_type='GZIP')
def _input_fn(file_pattern,
tf_transform_output,
num_epochs=None,
batch_size=32) -> tf.data.Dataset:
'''Create batches of features and labels from TF Records
Args:
file_pattern - List of files or patterns of file paths containing Example records.
tf_transform_output - transform output graph
num_epochs - Integer specifying the number of times to read through the dataset.
If None, cycles through the dataset forever.
batch_size - An int representing the number of records to combine in a single batch.
Returns:
A dataset of dict elements, (or a tuple of dict elements and label).
Each dict maps feature keys to Tensor or SparseTensor objects.
'''
transformed_feature_spec = (
tf_transform_output.transformed_feature_spec().copy())
dataset = tf.data.experimental.make_batched_features_dataset(
file_pattern=file_pattern,
batch_size=batch_size,
features=transformed_feature_spec,
reader=_gzip_reader_fn,
num_epochs=num_epochs,
label_key=LABEL_KEY)
return dataset
def model_builder(hp):
'''
Builds the model and sets up the hyperparameters to tune.
Args:
hp - Keras tuner object
Returns:
model with hyperparameters to tune
'''
# Initialize the Sequential API and start stacking the layers
model = keras.Sequential()
model.add(keras.layers.Flatten(input_shape=(28, 28, 1)))
# Get the number of units from the Tuner results
hp_units = hp.get('units')
model.add(keras.layers.Dense(units=hp_units, activation='relu'))
# Add next layers
model.add(keras.layers.Dropout(0.2))
model.add(keras.layers.Dense(10, activation='softmax'))
# Get the learning rate from the Tuner results
hp_learning_rate = hp.get('learning_rate')
# Setup model for training
model.compile(optimizer=keras.optimizers.Adam(learning_rate=hp_learning_rate),
loss=keras.losses.SparseCategoricalCrossentropy(),
metrics=['accuracy'])
# Print the model summary
model.summary()
return model
def run_fn(fn_args: FnArgs) -> None:
"""Defines and trains the model.
Args:
fn_args: Holds args as name/value pairs. Refer here for the complete attributes:
https://www.tensorflow.org/tfx/api_docs/python/tfx/components/trainer/fn_args_utils/FnArgs#attributes
"""
# Callback for TensorBoard
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=fn_args.model_run_dir, update_freq='batch')
# Load transform output
tf_transform_output = tft.TFTransformOutput(fn_args.transform_graph_path)
# Create batches of data good for 10 epochs
train_set = _input_fn(fn_args.train_files[0], tf_transform_output, 10)
val_set = _input_fn(fn_args.eval_files[0], tf_transform_output, 10)
# Load best hyperparameters
hp = fn_args.hyperparameters.get('values')
# Build the model
model = model_builder(hp)
# Train the model
model.fit(
x=train_set,
validation_data=val_set,
callbacks=[tensorboard_callback]
)
# Save the model
model.save(fn_args.serving_model_dir, save_format='tf')
# Setup the Trainer component
trainer = Trainer(
module_file=_trainer_module_file,
examples=transform.outputs['transformed_examples'],
hyperparameters=tuner.outputs['best_hyperparameters'],
transform_graph=transform.outputs['transform_graph'],
schema=schema_gen.outputs['schema'],
train_args=trainer_pb2.TrainArgs(splits=['train']),
eval_args=trainer_pb2.EvalArgs(splits=['eval']))
hparams_importer = ImporterNode(
instance_name='import_hparams',
# This can be Tuner's output file or manually edited file. The file contains
# text format of hyperparameters (kerastuner.HyperParameters.get_config())
source_uri='path/to/best_hyperparameters.txt',
artifact_type=HyperParameters)
trainer = Trainer(
...
# An alternative is directly use the tuned hyperparameters in Trainer's user
# module code and set hyperparameters to None here.
hyperparameters = hparams_importer.outputs['result'])
# Run the component
context.run(trainer, enable_cache=False)
# Get artifact uri of trainer model output
model_artifact_dir = trainer.outputs['model'].get()[0].uri
# List subdirectories artifact uri
print(f'contents of model artifact directory:{os.listdir(model_artifact_dir)}')
# Define the model directory
model_dir = os.path.join(model_artifact_dir, 'Format-Serving')
# List contents of model directory
print(f'contents of model directory: {os.listdir(model_dir)}')
model_run_artifact_dir = trainer.outputs['model_run'].get()[0].uri
%load_ext tensorboard
%tensorboard --logdir {model_run_artifact_dir}
| 0.79546 | 0.982288 |
# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)
**Based on the official GROMACS tutorial:** [http://www.mdtutorials.com/gmx/lysozyme/index.html](http://www.mdtutorials.com/gmx/lysozyme/index.html)
***
This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).
***
## Settings
### Biobb modules used
- [biobb_io](https://github.com/bioexcel/biobb_io): Tools to fetch biomolecular data from public databases.
- [biobb_model](https://github.com/bioexcel/biobb_model): Tools to model macromolecular structures.
- [biobb_md](https://github.com/bioexcel/biobb_md): Tools to setup and run Molecular Dynamics simulations.
- [biobb_analysis](https://github.com/bioexcel/biobb_analysis): Tools to analyse Molecular Dynamics trajectories.
### Auxiliar libraries used
- [nb_conda_kernels](https://github.com/Anaconda-Platform/nb_conda_kernels): Enables a Jupyter Notebook or JupyterLab application in one conda environment to access kernels for Python, R, and other languages found in other environments.
- [nglview](http://nglviewer.org/#nglview): Jupyter/IPython widget to interactively view molecular structures and trajectories in notebooks.
- [ipywidgets](https://github.com/jupyter-widgets/ipywidgets): Interactive HTML widgets for Jupyter notebooks and the IPython kernel.
- [plotly](https://plot.ly/python/offline/): Python interactive graphing library integrated in Jupyter notebooks.
- [simpletraj](https://github.com/arose/simpletraj): Lightweight coordinate-only trajectory reader based on code from GROMACS, MDAnalysis and VMD.
### Conda Installation and Launch
```console
git clone https://github.com/bioexcel/biobb_wf_md_setup.git
cd biobb_wf_md_setup
conda env create -f conda_env/environment.yml
conda activate biobb_MDsetup_tutorial
jupyter-nbextension enable --py --user widgetsnbextension
jupyter-nbextension enable --py --user nglview
jupyter-notebook biobb_wf_md_setup/notebooks/biobb_MDsetup_tutorial.ipynb
```
***
## Pipeline steps
1. [Input Parameters](#input)
2. [Fetching PDB Structure](#fetch)
3. [Fix Protein Structure](#fix)
4. [Create Protein System Topology](#top)
5. [Create Solvent Box](#box)
6. [Fill the Box with Water Molecules](#water)
7. [Adding Ions](#ions)
8. [Energetically Minimize the System](#min)
9. [Equilibrate the System (NVT)](#nvt)
10. [Equilibrate the System (NPT)](#npt)
11. [Free Molecular Dynamics Simulation](#free)
12. [Post-processing and Visualizing Resulting 3D Trajectory](#post)
13. [Output Files](#output)
14. [Questions & Comments](#questions)
***
<img src="https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png" alt="Bioexcel2 logo"
title="Bioexcel2 logo" width="400" />
***
<a id="input"></a>
## Input parameters
**Input parameters** needed:
- **pdbCode**: PDB code of the protein structure (e.g. 1AKI)
```
import nglview
import ipywidgets
pdbCode = "1AKI"
```
<a id="fetch"></a>
***
## Fetching PDB structure
Downloading **PDB structure** with the **protein molecule** from the RCSB PDB database.<br>
Alternatively, a **PDB file** can be used as starting structure. <br>
***
**Building Blocks** used:
- [Pdb](https://biobb-io.readthedocs.io/en/latest/api.html#module-api.pdb) from **biobb_io.api.pdb**
***
```
# Downloading desired PDB file
# Import module
from biobb_io.api.pdb import Pdb
# Create properties dict and inputs/outputs
downloaded_pdb = pdbCode+'.pdb'
prop = {
'pdb_code': pdbCode
}
#Create and launch bb
Pdb(output_pdb_path=downloaded_pdb,
properties=prop).launch()
```
<a id="vis3D"></a>
### Visualizing 3D structure
Visualizing the downloaded/given **PDB structure** using **NGL**:
```
# Show protein
view = nglview.show_structure_file(downloaded_pdb)
view.add_representation(repr_type='ball+stick', selection='all')
view._remote_call('setSize', target='Widget', args=['','600px'])
view
```
<img src='ngl1.png'></img>
<a id="fix"></a>
***
## Fix protein structure
**Checking** and **fixing** (if needed) the protein structure:<br>
- **Modeling** **missing side-chain atoms**, modifying incorrect **amide assignments**, choosing **alternative locations**.<br>
- **Checking** for missing **backbone atoms**, **heteroatoms**, **modified residues** and possible **atomic clashes**.
***
**Building Blocks** used:
- [FixSideChain](https://biobb-model.readthedocs.io/en/latest/model.html#module-model.fix_side_chain) from **biobb_model.model.fix_side_chain**
***
```
# Check & Fix PDB
# Import module
from biobb_model.model.fix_side_chain import FixSideChain
# Create prop dict and inputs/outputs
fixed_pdb = pdbCode + '_fixed.pdb'
# Create and launch bb
FixSideChain(input_pdb_path=downloaded_pdb,
output_pdb_path=fixed_pdb).launch()
```
### Visualizing 3D structure
Visualizing the fixed **PDB structure** using **NGL**. In this particular example, the checking step didn't find any issue to be solved, so there is no difference between the original structure and the fixed one.
```
# Show protein
view = nglview.show_structure_file(fixed_pdb)
view.add_representation(repr_type='ball+stick', selection='all')
view._remote_call('setSize', target='Widget', args=['','600px'])
view.camera='orthographic'
view
```
<img src='ngl2.png'></img>
<a id="top"></a>
***
## Create protein system topology
**Building GROMACS topology** corresponding to the protein structure.<br>
Force field used in this tutorial is [**amber99sb-ildn**](https://dx.doi.org/10.1002%2Fprot.22711): AMBER **parm99** force field with **corrections on backbone** (sb) and **side-chain torsion potentials** (ildn). Water molecules type used in this tutorial is [**spc/e**](https://pubs.acs.org/doi/abs/10.1021/j100308a038).<br>
Adding **hydrogen atoms** if missing. Automatically identifying **disulfide bridges**. <br>
Generating two output files:
- **GROMACS structure** (gro file)
- **GROMACS topology** ZIP compressed file containing:
- *GROMACS topology top file* (top file)
- *GROMACS position restraint file/s* (itp file/s)
***
**Building Blocks** used:
- [Pdb2gmx](https://biobb-md.readthedocs.io/en/latest/gromacs.html#module-gromacs.pdb2gmx) from **biobb_md.gromacs.pdb2gmx**
***
```
# Create system topology
# Import module
from biobb_md.gromacs.pdb2gmx import Pdb2gmx
# Create inputs/outputs
output_pdb2gmx_gro = pdbCode+'_pdb2gmx.gro'
output_pdb2gmx_top_zip = pdbCode+'_pdb2gmx_top.zip'
# Create and launch bb
Pdb2gmx(input_pdb_path=fixed_pdb,
output_gro_path=output_pdb2gmx_gro,
output_top_zip_path=output_pdb2gmx_top_zip).launch()
```
### Visualizing 3D structure
Visualizing the generated **GRO structure** using **NGL**. Note that **hydrogen atoms** were added to the structure by the **pdb2gmx GROMACS tool** when generating the **topology**.
```
# Show protein
view = nglview.show_structure_file(output_pdb2gmx_gro)
view.add_representation(repr_type='ball+stick', selection='all')
view._remote_call('setSize', target='Widget', args=['','600px'])
view.camera='orthographic'
view
```
<img src='ngl3.png'></img>
<a id="box"></a>
***
## Create solvent box
Define the unit cell for the **protein structure MD system** to fill it with water molecules.<br>
A **cubic box** is used to define the unit cell, with a **distance from the protein to the box edge of 1.0 nm**. The protein is **centered in the box**.
***
**Building Blocks** used:
- [Editconf](https://biobb-md.readthedocs.io/en/latest/gromacs.html#module-gromacs.editconf) from **biobb_md.gromacs.editconf**
***
```
# Editconf: Create solvent box
# Import module
from biobb_md.gromacs.editconf import Editconf
# Create prop dict and inputs/outputs
output_editconf_gro = pdbCode+'_editconf.gro'
prop = {
'box_type': 'cubic',
'distance_to_molecule': 1.0
}
#Create and launch bb
Editconf(input_gro_path=output_pdb2gmx_gro,
output_gro_path=output_editconf_gro,
properties=prop).launch()
```
<a id="water"></a>
***
## Fill the box with water molecules
Fill the unit cell for the **protein structure system** with water molecules.<br>
The solvent type used is the default **Simple Point Charge water (SPC)**, a generic equilibrated 3-point solvent model.
***
**Building Blocks** used:
- [Solvate](https://biobb-md.readthedocs.io/en/latest/gromacs.html#module-gromacs.solvate) from **biobb_md.gromacs.solvate**
***
```
# Solvate: Fill the box with water molecules
from biobb_md.gromacs.solvate import Solvate
# Create prop dict and inputs/outputs
output_solvate_gro = pdbCode+'_solvate.gro'
output_solvate_top_zip = pdbCode+'_solvate_top.zip'
# Create and launch bb
Solvate(input_solute_gro_path=output_editconf_gro,
output_gro_path=output_solvate_gro,
input_top_zip_path=output_pdb2gmx_top_zip,
output_top_zip_path=output_solvate_top_zip).launch()
```
### Visualizing 3D structure
Visualizing the **protein system** with the newly added **solvent box** using **NGL**.<br> Note the **cubic box** filled with **water molecules** surrounding the **protein structure**, which is **centered** right in the middle of the cube.
```
# Show protein
view = nglview.show_structure_file(output_solvate_gro)
view.clear_representations()
view.add_representation(repr_type='cartoon', selection='solute', color='green')
view.add_representation(repr_type='ball+stick', selection='SOL')
view._remote_call('setSize', target='Widget', args=['','600px'])
view.camera='orthographic'
view
```
<img src='ngl4.png'></img>
<a id="ions"></a>
***
## Adding ions
Add ions to neutralize the **protein structure** charge
- [Step 1](#ionsStep1): Creating portable binary run file for ion generation
- [Step 2](#ionsStep2): Adding ions to **neutralize** the system
***
**Building Blocks** used:
- [Grompp](https://biobb-md.readthedocs.io/en/latest/gromacs.html#module-gromacs.grompp) from **biobb_md.gromacs.grompp**
- [Genion](https://biobb-md.readthedocs.io/en/latest/gromacs.html#module-gromacs.genion) from **biobb_md.gromacs.genion**
***
<a id="ionsStep1"></a>
### Step 1: Creating portable binary run file for ion generation
A simple **energy minimization** molecular dynamics parameters (mdp) properties will be used to generate the portable binary run file for **ion generation**, although **any legitimate combination of parameters** could be used in this step.
```
# Grompp: Creating portable binary run file for ion generation
from biobb_md.gromacs.grompp import Grompp
# Create prop dict and inputs/outputs
output_gppion_tpr = pdbCode+'_gppion.tpr'
prop = {
'mdp':{
'type': 'minimization'
}
}
# Create and launch bb
Grompp(input_gro_path=output_solvate_gro,
input_top_zip_path=output_solvate_top_zip,
output_tpr_path=output_gppion_tpr,
properties=prop).launch()
```
<a id="ionsStep2"></a>
### Step 2: Adding ions to neutralize the system
Replace **solvent molecules** with **ions** to **neutralize** the system.
```
# Genion: Adding ions to neutralize the system
from biobb_md.gromacs.genion import Genion
# Create prop dict and inputs/outputs
output_genion_gro = pdbCode+'_genion.gro'
output_genion_top_zip = pdbCode+'_genion_top.zip'
prop={
'neutral':True
}
# Create and launch bb
Genion(input_tpr_path=output_gppion_tpr,
output_gro_path=output_genion_gro,
input_top_zip_path=output_solvate_top_zip,
output_top_zip_path=output_genion_top_zip,
properties=prop).launch()
```
### Visualizing 3D structure
Visualizing the **neutralized protein system** with the newly added **ions** using **NGL**
```
# Show protein
view = nglview.show_structure_file(output_genion_gro)
view.clear_representations()
view.add_representation(repr_type='cartoon', selection='solute', color='sstruc')
view.add_representation(repr_type='ball+stick', selection='NA')
view.add_representation(repr_type='ball+stick', selection='CL')
view._remote_call('setSize', target='Widget', args=['','600px'])
view.camera='orthographic'
view
```
<img src='ngl5.png'></img>
<a id="min"></a>
***
## Energetically minimize the system
Energetically minimize the **protein system** till reaching a desired potential energy.
- [Step 1](#emStep1): Creating portable binary run file for energy minimization
- [Step 2](#emStep2): Energetically minimize the **system** till reaching a force of 500 kJ mol-1 nm-1.
- [Step 3](#emStep3): Checking **energy minimization** results. Plotting energy by time during the **minimization** process.
***
**Building Blocks** used:
- [Grompp](https://biobb-md.readthedocs.io/en/latest/gromacs.html#module-gromacs.grompp) from **biobb_md.gromacs.grompp**
- [Mdrun](https://biobb-md.readthedocs.io/en/latest/gromacs.html#module-gromacs.mdrun) from **biobb_md.gromacs.mdrun**
- [GMXEnergy](https://biobb-analysis.readthedocs.io/en/latest/gromacs.html#module-gromacs.gmx_energy) from **biobb_analysis.gromacs.gmx_energy**
***
<a id="emStep1"></a>
### Step 1: Creating portable binary run file for energy minimization
The **minimization** type of the **molecular dynamics parameters (mdp) property** contains the main default parameters to run an **energy minimization**:
- integrator = steep ; Algorithm (steep = steepest descent minimization)
- emtol = 1000.0 ; Stop minimization when the maximum force < 1000.0 kJ/mol/nm
- emstep = 0.01 ; Minimization step size (nm)
- nsteps = 50000 ; Maximum number of (minimization) steps to perform
In this particular example, the method used to run the **energy minimization** is the default **steepest descent**, but the **maximum force** is placed at **500 KJ/mol\*nm^2**, and the **maximum number of steps** to perform (if the maximum force is not reached) to **5,000 steps**.
```
# Grompp: Creating portable binary run file for mdrun
from biobb_md.gromacs.grompp import Grompp
# Create prop dict and inputs/outputs
output_gppmin_tpr = pdbCode+'_gppmin.tpr'
prop = {
'mdp':{
'type': 'minimization',
'emtol':'500',
'nsteps':'5000'
}
}
# Create and launch bb
Grompp(input_gro_path=output_genion_gro,
input_top_zip_path=output_genion_top_zip,
output_tpr_path=output_gppmin_tpr,
properties=prop).launch()
```
<a id="emStep2"></a>
### Step 2: Running Energy Minimization
Running **energy minimization** using the **tpr file** generated in the previous step.
```
# Mdrun: Running minimization
from biobb_md.gromacs.mdrun import Mdrun
# Create prop dict and inputs/outputs
output_min_trr = pdbCode+'_min.trr'
output_min_gro = pdbCode+'_min.gro'
output_min_edr = pdbCode+'_min.edr'
output_min_log = pdbCode+'_min.log'
# Create and launch bb
Mdrun(input_tpr_path=output_gppmin_tpr,
output_trr_path=output_min_trr,
output_gro_path=output_min_gro,
output_edr_path=output_min_edr,
output_log_path=output_min_log).launch()
```
<a id="emStep3"></a>
### Step 3: Checking Energy Minimization results
Checking **energy minimization** results. Plotting **potential energy** by time during the minimization process.
```
# GMXEnergy: Getting system energy by time
from biobb_analysis.gromacs.gmx_energy import GMXEnergy
# Create prop dict and inputs/outputs
output_min_ene_xvg = pdbCode+'_min_ene.xvg'
prop = {
'terms': ["Potential"]
}
# Create and launch bb
GMXEnergy(input_energy_path=output_min_edr,
output_xvg_path=output_min_ene_xvg,
properties=prop).launch()
import plotly
import plotly.graph_objs as go
#Read data from file and filter energy values higher than 1000 Kj/mol^-1
with open(output_min_ene_xvg,'r') as energy_file:
x,y = map(
list,
zip(*[
(float(line.split()[0]),float(line.split()[1]))
for line in energy_file
if not line.startswith(("#","@"))
if float(line.split()[1]) < 1000
])
)
plotly.offline.init_notebook_mode(connected=True)
fig = {
"data": [go.Scatter(x=x, y=y)],
"layout": go.Layout(title="Energy Minimization",
xaxis=dict(title = "Energy Minimization Step"),
yaxis=dict(title = "Potential Energy KJ/mol-1")
)
}
plotly.offline.iplot(fig)
```
<img src='plot1.png' />
<a id="nvt"></a>
***
## Equilibrate the system (NVT)
Equilibrate the **protein system** in **NVT ensemble** (constant Number of particles, Volume and Temperature). Protein **heavy atoms** will be restrained using position restraining forces: movement is permitted, but only after overcoming a substantial energy penalty. The utility of position restraints is that they allow us to equilibrate our solvent around our protein, without the added variable of structural changes in the protein.
- [Step 1](#eqNVTStep1): Creating portable binary run file for system equilibration
- [Step 2](#eqNVTStep2): Equilibrate the **protein system** with **NVT** ensemble.
- [Step 3](#eqNVTStep3): Checking **NVT Equilibration** results. Plotting **system temperature** by time during the **NVT equilibration** process.
***
**Building Blocks** used:
- [Grompp](https://biobb-md.readthedocs.io/en/latest/gromacs.html#module-gromacs.grompp) from **biobb_md.gromacs.grompp**
- [Mdrun](https://biobb-md.readthedocs.io/en/latest/gromacs.html#module-gromacs.mdrun) from **biobb_md.gromacs.mdrun**
- [GMXEnergy](https://biobb-analysis.readthedocs.io/en/latest/gromacs.html#module-gromacs.gmx_energy) from **biobb_analysis.gromacs.gmx_energy**
***
<a id="eqNVTStep1"></a>
### Step 1: Creating portable binary run file for system equilibration (NVT)
The **nvt** type of the **molecular dynamics parameters (mdp) property** contains the main default parameters to run an **NVT equilibration** with **protein restraints** (see [GROMACS mdp options](http://manual.gromacs.org/documentation/2018/user-guide/mdp-options.html)):
- Define = -DPOSRES
- integrator = md
- dt = 0.002
- nsteps = 5000
- pcoupl = no
- gen_vel = yes
- gen_temp = 300
- gen_seed = -1
In this particular example, the default parameters will be used: **md** integrator algorithm, a **step size** of **2fs**, **5,000 equilibration steps** with the protein **heavy atoms restrained**, and a temperature of **300K**.
*Please note that for the sake of time this tutorial is only running 10ps of NVT equilibration, whereas in the [original example](http://www.mdtutorials.com/gmx/lysozyme/06_equil.html) the simulated time was 100ps.*
```
# Grompp: Creating portable binary run file for NVT Equilibration
from biobb_md.gromacs.grompp import Grompp
# Create prop dict and inputs/outputs
output_gppnvt_tpr = pdbCode+'_gppnvt.tpr'
prop = {
'mdp':{
'type': 'nvt',
'nsteps': 5000,
'dt': 0.002,
'define': '-DPOSRES',
#'tc_grps': "DNA Water_and_ions" # NOTE: uncomment this line if working with DNA
}
}
# Create and launch bb
Grompp(input_gro_path=output_min_gro,
input_top_zip_path=output_genion_top_zip,
output_tpr_path=output_gppnvt_tpr,
properties=prop).launch()
```
<a id="eqNVTStep2"></a>
### Step 2: Running NVT equilibration
```
# Mdrun: Running Equilibration NVT
from biobb_md.gromacs.mdrun import Mdrun
# Create prop dict and inputs/outputs
output_nvt_trr = pdbCode+'_nvt.trr'
output_nvt_gro = pdbCode+'_nvt.gro'
output_nvt_edr = pdbCode+'_nvt.edr'
output_nvt_log = pdbCode+'_nvt.log'
output_nvt_cpt = pdbCode+'_nvt.cpt'
# Create and launch bb
Mdrun(input_tpr_path=output_gppnvt_tpr,
output_trr_path=output_nvt_trr,
output_gro_path=output_nvt_gro,
output_edr_path=output_nvt_edr,
output_log_path=output_nvt_log,
output_cpt_path=output_nvt_cpt).launch()
```
<a id="eqNVTStep3"></a>
### Step 3: Checking NVT Equilibration results
Checking **NVT Equilibration** results. Plotting **system temperature** by time during the NVT equilibration process.
```
# GMXEnergy: Getting system temperature by time during NVT Equilibration
from biobb_analysis.gromacs.gmx_energy import GMXEnergy
# Create prop dict and inputs/outputs
output_nvt_temp_xvg = pdbCode+'_nvt_temp.xvg'
prop = {
'terms': ["Temperature"]
}
# Create and launch bb
GMXEnergy(input_energy_path=output_nvt_edr,
output_xvg_path=output_nvt_temp_xvg,
properties=prop).launch()
import plotly
import plotly.graph_objs as go
# Read temperature data from file
with open(output_nvt_temp_xvg,'r') as temperature_file:
x,y = map(
list,
zip(*[
(float(line.split()[0]),float(line.split()[1]))
for line in temperature_file
if not line.startswith(("#","@"))
])
)
plotly.offline.init_notebook_mode(connected=True)
fig = {
"data": [go.Scatter(x=x, y=y)],
"layout": go.Layout(title="Temperature during NVT Equilibration",
xaxis=dict(title = "Time (ps)"),
yaxis=dict(title = "Temperature (K)")
)
}
plotly.offline.iplot(fig)
```
<img src='plot2.png' />
<a id="npt"></a>
***
## Equilibrate the system (NPT)
Equilibrate the **protein system** in **NPT** ensemble (constant Number of particles, Pressure and Temperature).
- [Step 1](#eqNPTStep1): Creating portable binary run file for system equilibration
- [Step 2](#eqNPTStep2): Equilibrate the **protein system** with **NPT** ensemble.
- [Step 3](#eqNPTStep3): Checking **NPT Equilibration** results. Plotting **system pressure and density** by time during the **NPT equilibration** process.
***
**Building Blocks** used:
- [Grompp](https://biobb-md.readthedocs.io/en/latest/gromacs.html#module-gromacs.grompp) from **biobb_md.gromacs.grompp**
- [Mdrun](https://biobb-md.readthedocs.io/en/latest/gromacs.html#module-gromacs.mdrun) from **biobb_md.gromacs.mdrun**
- [GMXEnergy](https://biobb-analysis.readthedocs.io/en/latest/gromacs.html#module-gromacs.gmx_energy) from **biobb_analysis.gromacs.gmx_energy**
***
<a id="eqNPTStep1"></a>
### Step 1: Creating portable binary run file for system equilibration (NPT)
The **npt** type of the **molecular dynamics parameters (mdp) property** contains the main default parameters to run an **NPT equilibration** with **protein restraints** (see [GROMACS mdp options](http://manual.gromacs.org/documentation/2018/user-guide/mdp-options.html)):
- Define = -DPOSRES
- integrator = md
- dt = 0.002
- nsteps = 5000
- pcoupl = Parrinello-Rahman
- pcoupltype = isotropic
- tau_p = 1.0
- ref_p = 1.0
- compressibility = 4.5e-5
- refcoord_scaling = com
- gen_vel = no
In this particular example, the default parameters will be used: **md** integrator algorithm, a **time step** of **2fs**, **5,000 equilibration steps** with the protein **heavy atoms restrained**, and a Parrinello-Rahman **pressure coupling** algorithm.
*Please note that for the sake of time this tutorial is only running 10ps of NPT equilibration, whereas in the [original example](http://www.mdtutorials.com/gmx/lysozyme/07_equil2.html) the simulated time was 100ps.*
```
# Grompp: Creating portable binary run file for NPT System Equilibration
from biobb_md.gromacs.grompp import Grompp
# Create prop dict and inputs/outputs
output_gppnpt_tpr = pdbCode+'_gppnpt.tpr'
prop = {
'mdp':{
'type': 'npt',
'nsteps':'5000',
#'tc_grps': "DNA Water_and_ions" # NOTE: uncomment this line if working with DNA
}
}
# Create and launch bb
Grompp(input_gro_path=output_nvt_gro,
input_top_zip_path=output_genion_top_zip,
output_tpr_path=output_gppnpt_tpr,
input_cpt_path=output_nvt_cpt,
properties=prop).launch()
```
<a id="eqNPTStep2"></a>
### Step 2: Running NPT equilibration
```
# Mdrun: Running NPT System Equilibration
from biobb_md.gromacs.mdrun import Mdrun
# Create prop dict and inputs/outputs
output_npt_trr = pdbCode+'_npt.trr'
output_npt_gro = pdbCode+'_npt.gro'
output_npt_edr = pdbCode+'_npt.edr'
output_npt_log = pdbCode+'_npt.log'
output_npt_cpt = pdbCode+'_npt.cpt'
# Create and launch bb
Mdrun(input_tpr_path=output_gppnpt_tpr,
output_trr_path=output_npt_trr,
output_gro_path=output_npt_gro,
output_edr_path=output_npt_edr,
output_log_path=output_npt_log,
output_cpt_path=output_npt_cpt).launch()
```
<a id="eqNPTStep3"></a>
### Step 3: Checking NPT Equilibration results
Checking **NPT Equilibration** results. Plotting **system pressure and density** by time during the **NPT equilibration** process.
```
# GMXEnergy: Getting system pressure and density by time during NPT Equilibration
from biobb_analysis.gromacs.gmx_energy import GMXEnergy
# Create prop dict and inputs/outputs
output_npt_pd_xvg = pdbCode+'_npt_PD.xvg'
prop = {
'terms': ["Pressure","Density"]
}
# Create and launch bb
GMXEnergy(input_energy_path=output_npt_edr,
output_xvg_path=output_npt_pd_xvg,
properties=prop).launch()
import plotly
from plotly import subplots
import plotly.graph_objs as go
# Read pressure and density data from file
with open(output_npt_pd_xvg,'r') as pd_file:
x,y,z = map(
list,
zip(*[
(float(line.split()[0]),float(line.split()[1]),float(line.split()[2]))
for line in pd_file
if not line.startswith(("#","@"))
])
)
plotly.offline.init_notebook_mode(connected=True)
trace1 = go.Scatter(
x=x,y=y
)
trace2 = go.Scatter(
x=x,y=z
)
fig = subplots.make_subplots(rows=1, cols=2, print_grid=False)
fig.append_trace(trace1, 1, 1)
fig.append_trace(trace2, 1, 2)
fig['layout']['xaxis1'].update(title='Time (ps)')
fig['layout']['xaxis2'].update(title='Time (ps)')
fig['layout']['yaxis1'].update(title='Pressure (bar)')
fig['layout']['yaxis2'].update(title='Density (Kg*m^-3)')
fig['layout'].update(title='Pressure and Density during NPT Equilibration')
fig['layout'].update(showlegend=False)
plotly.offline.iplot(fig)
```
<img src='plot3.png' />
<a id="free"></a>
***
## Free Molecular Dynamics Simulation
Upon completion of the **two equilibration phases (NVT and NPT)**, the system is now well-equilibrated at the desired temperature and pressure. The **position restraints** can now be released. The last step of the **protein** MD setup is a short, **free MD simulation**, to ensure the robustness of the system.
- [Step 1](#mdStep1): Creating portable binary run file to run a **free MD simulation**.
- [Step 2](#mdStep2): Run short MD simulation of the **protein system**.
- [Step 3](#mdStep3): Checking results for the final step of the setup process, the **free MD run**. Plotting **Root Mean Square deviation (RMSd)** and **Radius of Gyration (Rgyr)** by time during the **free MD run** step.
***
**Building Blocks** used:
- [Grompp](https://biobb-md.readthedocs.io/en/latest/gromacs.html#module-gromacs.grompp) from **biobb_md.gromacs.grompp**
- [Mdrun](https://biobb-md.readthedocs.io/en/latest/gromacs.html#module-gromacs.mdrun) from **biobb_md.gromacs.mdrun**
- [GMXRms](https://biobb-analysis.readthedocs.io/en/latest/gromacs.html#module-gromacs.gmx_rms) from **biobb_analysis.gromacs.gmx_rms**
- [GMXRgyr](https://biobb-analysis.readthedocs.io/en/latest/gromacs.html#module-gromacs.gmx_rgyr) from **biobb_analysis.gromacs.gmx_rgyr**
***
<a id="mdStep1"></a>
### Step 1: Creating portable binary run file to run a free MD simulation
The **free** type of the **molecular dynamics parameters (mdp) property** contains the main default parameters to run an **free MD simulation** (see [GROMACS mdp options](http://manual.gromacs.org/documentation/2018/user-guide/mdp-options.html)):
- integrator = md
- dt = 0.002 (ps)
- nsteps = 50000
In this particular example, the default parameters will be used: **md** integrator algorithm, a **time step** of **2fs**, and a total of **50,000 md steps** (100ps).
*Please note that for the sake of time this tutorial is only running 100ps of free MD, whereas in the [original example](http://www.mdtutorials.com/gmx/lysozyme/08_MD.html) the simulated time was 1ns (1000ps).*
```
# Grompp: Creating portable binary run file for mdrun
from biobb_md.gromacs.grompp import Grompp
# Create prop dict and inputs/outputs
output_gppmd_tpr = pdbCode+'_gppmd.tpr'
prop = {
'mdp':{
'type': 'free',
'nsteps':'50000',
#'tc_grps': "DNA Water_and_ions" # NOTE: uncomment this line if working with DNA
}
}
# Create and launch bb
Grompp(input_gro_path=output_npt_gro,
input_top_zip_path=output_genion_top_zip,
output_tpr_path=output_gppmd_tpr,
input_cpt_path=output_npt_cpt,
properties=prop).launch()
```
<a id="mdStep2"></a>
### Step 2: Running short free MD simulation
```
# Mdrun: Running free dynamics
from biobb_md.gromacs.mdrun import Mdrun
# Create prop dict and inputs/outputs
output_md_trr = pdbCode+'_md.trr'
output_md_gro = pdbCode+'_md.gro'
output_md_edr = pdbCode+'_md.edr'
output_md_log = pdbCode+'_md.log'
output_md_cpt = pdbCode+'_md.cpt'
# Create and launch bb
Mdrun(input_tpr_path=output_gppmd_tpr,
output_trr_path=output_md_trr,
output_gro_path=output_md_gro,
output_edr_path=output_md_edr,
output_log_path=output_md_log,
output_cpt_path=output_md_cpt).launch()
```
<a id="mdStep3"></a>
### Step 3: Checking free MD simulation results
Checking results for the final step of the setup process, the **free MD run**. Plotting **Root Mean Square deviation (RMSd)** and **Radius of Gyration (Rgyr)** by time during the **free MD run** step. **RMSd** against the **experimental structure** (input structure of the pipeline) and against the **minimized and equilibrated structure** (output structure of the NPT equilibration step).
```
# GMXRms: Computing Root Mean Square deviation to analyse structural stability
# RMSd against minimized and equilibrated snapshot (backbone atoms)
from biobb_analysis.gromacs.gmx_rms import GMXRms
# Create prop dict and inputs/outputs
output_rms_first = pdbCode+'_rms_first.xvg'
prop = {
'selection': 'Backbone',
#'selection': 'non-Water'
}
# Create and launch bb
GMXRms(input_structure_path=output_gppmd_tpr,
input_traj_path=output_md_trr,
output_xvg_path=output_rms_first,
properties=prop).launch()
# GMXRms: Computing Root Mean Square deviation to analyse structural stability
# RMSd against experimental structure (backbone atoms)
from biobb_analysis.gromacs.gmx_rms import GMXRms
# Create prop dict and inputs/outputs
output_rms_exp = pdbCode+'_rms_exp.xvg'
prop = {
'selection': 'Backbone',
#'selection': 'non-Water'
}
# Create and launch bb
GMXRms(input_structure_path=output_gppmin_tpr,
input_traj_path=output_md_trr,
output_xvg_path=output_rms_exp,
properties=prop).launch()
import plotly
import plotly.graph_objs as go
# Read RMS vs first snapshot data from file
with open(output_rms_first,'r') as rms_first_file:
x,y = map(
list,
zip(*[
(float(line.split()[0]),float(line.split()[1]))
for line in rms_first_file
if not line.startswith(("#","@"))
])
)
# Read RMS vs experimental structure data from file
with open(output_rms_exp,'r') as rms_exp_file:
x2,y2 = map(
list,
zip(*[
(float(line.split()[0]),float(line.split()[1]))
for line in rms_exp_file
if not line.startswith(("#","@"))
])
)
trace1 = go.Scatter(
x = x,
y = y,
name = 'RMSd vs first'
)
trace2 = go.Scatter(
x = x,
y = y2,
name = 'RMSd vs exp'
)
data = [trace1, trace2]
plotly.offline.init_notebook_mode(connected=True)
fig = {
"data": data,
"layout": go.Layout(title="RMSd during free MD Simulation",
xaxis=dict(title = "Time (ps)"),
yaxis=dict(title = "RMSd (nm)")
)
}
plotly.offline.iplot(fig)
```
<img src='plot4.png' />
```
# GMXRgyr: Computing Radius of Gyration to measure the protein compactness during the free MD simulation
from biobb_analysis.gromacs.gmx_rgyr import GMXRgyr
# Create prop dict and inputs/outputs
output_rgyr = pdbCode+'_rgyr.xvg'
prop = {
'selection': 'Backbone'
}
# Create and launch bb
GMXRgyr(input_structure_path=output_gppmin_tpr,
input_traj_path=output_md_trr,
output_xvg_path=output_rgyr,
properties=prop).launch()
import plotly
import plotly.graph_objs as go
# Read Rgyr data from file
with open(output_rgyr,'r') as rgyr_file:
x,y = map(
list,
zip(*[
(float(line.split()[0]),float(line.split()[1]))
for line in rgyr_file
if not line.startswith(("#","@"))
])
)
plotly.offline.init_notebook_mode(connected=True)
fig = {
"data": [go.Scatter(x=x, y=y)],
"layout": go.Layout(title="Radius of Gyration",
xaxis=dict(title = "Time (ps)"),
yaxis=dict(title = "Rgyr (nm)")
)
}
plotly.offline.iplot(fig)
```
<img src='plot5.png' />
<a id="post"></a>
***
## Post-processing and Visualizing resulting 3D trajectory
Post-processing and Visualizing the **protein system** MD setup **resulting trajectory** using **NGL**
- [Step 1](#ppStep1): *Imaging* the resulting trajectory, **stripping out water molecules and ions** and **correcting periodicity issues**.
- [Step 2](#ppStep2): Generating a *dry* structure, **removing water molecules and ions** from the final snapshot of the MD setup pipeline.
- [Step 3](#ppStep3): Visualizing the *imaged* trajectory using the *dry* structure as a **topology**.
***
**Building Blocks** used:
- [GMXImage](https://biobb-analysis.readthedocs.io/en/latest/gromacs.html#module-gromacs.gmx_image) from **biobb_analysis.gromacs.gmx_image**
- [GMXTrjConvStr](https://biobb-analysis.readthedocs.io/en/latest/gromacs.html#module-gromacs.gmx_trjconv_str) from **biobb_analysis.gromacs.gmx_trjconv_str**
***
<a id="ppStep1"></a>
### Step 1: *Imaging* the resulting trajectory.
Stripping out **water molecules and ions** and **correcting periodicity issues**
```
# GMXImage: "Imaging" the resulting trajectory
# Removing water molecules and ions from the resulting structure
from biobb_analysis.gromacs.gmx_image import GMXImage
# Create prop dict and inputs/outputs
output_imaged_traj = pdbCode+'_imaged_traj.trr'
prop = {
'center_selection': 'Protein',
'output_selection': 'Protein',
'pbc' : 'mol',
'center' : True
}
# Create and launch bb
GMXImage(input_traj_path=output_md_trr,
input_top_path=output_gppmd_tpr,
output_traj_path=output_imaged_traj,
properties=prop).launch()
```
<a id="ppStep2"></a>
### Step 2: Generating the output *dry* structure.
**Removing water molecules and ions** from the resulting structure
```
# GMXTrjConvStr: Converting and/or manipulating a structure
# Removing water molecules and ions from the resulting structure
# The "dry" structure will be used as a topology to visualize
# the "imaged dry" trajectory generated in the previous step.
from biobb_analysis.gromacs.gmx_trjconv_str import GMXTrjConvStr
# Create prop dict and inputs/outputs
output_dry_gro = pdbCode+'_md_dry.gro'
prop = {
'selection': 'Protein'
}
# Create and launch bb
GMXTrjConvStr(input_structure_path=output_md_gro,
input_top_path=output_gppmd_tpr,
output_str_path=output_dry_gro,
properties=prop).launch()
```
<a id="ppStep3"></a>
### Step 3: Visualizing the generated dehydrated trajectory.
Using the **imaged trajectory** (output of the [Post-processing step 1](#ppStep1)) with the **dry structure** (output of the [Post-processing step 2](#ppStep2)) as a topology.
```
# Show trajectory
view = nglview.show_simpletraj(nglview.SimpletrajTrajectory(output_imaged_traj, output_dry_gro), gui=True)
view
```
<img src='trajectory.gif'></img>
<a id="output"></a>
## Output files
Important **Output files** generated:
- {{output_md_gro}}: **Final structure** (snapshot) of the MD setup protocol.
- {{output_md_trr}}: **Final trajectory** of the MD setup protocol.
- {{output_md_cpt}}: **Final checkpoint file**, with information about the state of the simulation. It can be used to **restart** or **continue** a MD simulation.
- {{output_gppmd_tpr}}: **Final tpr file**, GROMACS portable binary run input file. This file contains the starting structure of the **MD setup free MD simulation step**, together with the molecular topology and all the simulation parameters. It can be used to **extend** the simulation.
- {{output_genion_top_zip}}: **Final topology** of the MD system. It is a compressed zip file including a **topology file** (.top) and a set of auxiliar **include topology** files (.itp).
**Analysis** (MD setup check) output files generated:
- {{output_rms_first}}: **Root Mean Square deviation (RMSd)** against **minimized and equilibrated structure** of the final **free MD run step**.
- {{output_rms_exp}}: **Root Mean Square deviation (RMSd)** against **experimental structure** of the final **free MD run step**.
- {{output_rgyr}}: **Radius of Gyration** of the final **free MD run step** of the **setup pipeline**.
***
<a id="questions"></a>
## Questions & Comments
Questions, issues, suggestions and comments are really welcome!
* GitHub issues:
* [https://github.com/bioexcel/biobb](https://github.com/bioexcel/biobb)
* BioExcel forum:
* [https://ask.bioexcel.eu/c/BioExcel-Building-Blocks-library](https://ask.bioexcel.eu/c/BioExcel-Building-Blocks-library)
|
github_jupyter
|
git clone https://github.com/bioexcel/biobb_wf_md_setup.git
cd biobb_wf_md_setup
conda env create -f conda_env/environment.yml
conda activate biobb_MDsetup_tutorial
jupyter-nbextension enable --py --user widgetsnbextension
jupyter-nbextension enable --py --user nglview
jupyter-notebook biobb_wf_md_setup/notebooks/biobb_MDsetup_tutorial.ipynb
```
***
## Pipeline steps
1. [Input Parameters](#input)
2. [Fetching PDB Structure](#fetch)
3. [Fix Protein Structure](#fix)
4. [Create Protein System Topology](#top)
5. [Create Solvent Box](#box)
6. [Fill the Box with Water Molecules](#water)
7. [Adding Ions](#ions)
8. [Energetically Minimize the System](#min)
9. [Equilibrate the System (NVT)](#nvt)
10. [Equilibrate the System (NPT)](#npt)
11. [Free Molecular Dynamics Simulation](#free)
12. [Post-processing and Visualizing Resulting 3D Trajectory](#post)
13. [Output Files](#output)
14. [Questions & Comments](#questions)
***
<img src="https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png" alt="Bioexcel2 logo"
title="Bioexcel2 logo" width="400" />
***
<a id="input"></a>
## Input parameters
**Input parameters** needed:
- **pdbCode**: PDB code of the protein structure (e.g. 1AKI)
<a id="fetch"></a>
***
## Fetching PDB structure
Downloading **PDB structure** with the **protein molecule** from the RCSB PDB database.<br>
Alternatively, a **PDB file** can be used as starting structure. <br>
***
**Building Blocks** used:
- [Pdb](https://biobb-io.readthedocs.io/en/latest/api.html#module-api.pdb) from **biobb_io.api.pdb**
***
<a id="vis3D"></a>
### Visualizing 3D structure
Visualizing the downloaded/given **PDB structure** using **NGL**:
<img src='ngl1.png'></img>
<a id="fix"></a>
***
## Fix protein structure
**Checking** and **fixing** (if needed) the protein structure:<br>
- **Modeling** **missing side-chain atoms**, modifying incorrect **amide assignments**, choosing **alternative locations**.<br>
- **Checking** for missing **backbone atoms**, **heteroatoms**, **modified residues** and possible **atomic clashes**.
***
**Building Blocks** used:
- [FixSideChain](https://biobb-model.readthedocs.io/en/latest/model.html#module-model.fix_side_chain) from **biobb_model.model.fix_side_chain**
***
### Visualizing 3D structure
Visualizing the fixed **PDB structure** using **NGL**. In this particular example, the checking step didn't find any issue to be solved, so there is no difference between the original structure and the fixed one.
<img src='ngl2.png'></img>
<a id="top"></a>
***
## Create protein system topology
**Building GROMACS topology** corresponding to the protein structure.<br>
Force field used in this tutorial is [**amber99sb-ildn**](https://dx.doi.org/10.1002%2Fprot.22711): AMBER **parm99** force field with **corrections on backbone** (sb) and **side-chain torsion potentials** (ildn). Water molecules type used in this tutorial is [**spc/e**](https://pubs.acs.org/doi/abs/10.1021/j100308a038).<br>
Adding **hydrogen atoms** if missing. Automatically identifying **disulfide bridges**. <br>
Generating two output files:
- **GROMACS structure** (gro file)
- **GROMACS topology** ZIP compressed file containing:
- *GROMACS topology top file* (top file)
- *GROMACS position restraint file/s* (itp file/s)
***
**Building Blocks** used:
- [Pdb2gmx](https://biobb-md.readthedocs.io/en/latest/gromacs.html#module-gromacs.pdb2gmx) from **biobb_md.gromacs.pdb2gmx**
***
### Visualizing 3D structure
Visualizing the generated **GRO structure** using **NGL**. Note that **hydrogen atoms** were added to the structure by the **pdb2gmx GROMACS tool** when generating the **topology**.
<img src='ngl3.png'></img>
<a id="box"></a>
***
## Create solvent box
Define the unit cell for the **protein structure MD system** to fill it with water molecules.<br>
A **cubic box** is used to define the unit cell, with a **distance from the protein to the box edge of 1.0 nm**. The protein is **centered in the box**.
***
**Building Blocks** used:
- [Editconf](https://biobb-md.readthedocs.io/en/latest/gromacs.html#module-gromacs.editconf) from **biobb_md.gromacs.editconf**
***
<a id="water"></a>
***
## Fill the box with water molecules
Fill the unit cell for the **protein structure system** with water molecules.<br>
The solvent type used is the default **Simple Point Charge water (SPC)**, a generic equilibrated 3-point solvent model.
***
**Building Blocks** used:
- [Solvate](https://biobb-md.readthedocs.io/en/latest/gromacs.html#module-gromacs.solvate) from **biobb_md.gromacs.solvate**
***
### Visualizing 3D structure
Visualizing the **protein system** with the newly added **solvent box** using **NGL**.<br> Note the **cubic box** filled with **water molecules** surrounding the **protein structure**, which is **centered** right in the middle of the cube.
<img src='ngl4.png'></img>
<a id="ions"></a>
***
## Adding ions
Add ions to neutralize the **protein structure** charge
- [Step 1](#ionsStep1): Creating portable binary run file for ion generation
- [Step 2](#ionsStep2): Adding ions to **neutralize** the system
***
**Building Blocks** used:
- [Grompp](https://biobb-md.readthedocs.io/en/latest/gromacs.html#module-gromacs.grompp) from **biobb_md.gromacs.grompp**
- [Genion](https://biobb-md.readthedocs.io/en/latest/gromacs.html#module-gromacs.genion) from **biobb_md.gromacs.genion**
***
<a id="ionsStep1"></a>
### Step 1: Creating portable binary run file for ion generation
A simple **energy minimization** molecular dynamics parameters (mdp) properties will be used to generate the portable binary run file for **ion generation**, although **any legitimate combination of parameters** could be used in this step.
<a id="ionsStep2"></a>
### Step 2: Adding ions to neutralize the system
Replace **solvent molecules** with **ions** to **neutralize** the system.
### Visualizing 3D structure
Visualizing the **neutralized protein system** with the newly added **ions** using **NGL**
<img src='ngl5.png'></img>
<a id="min"></a>
***
## Energetically minimize the system
Energetically minimize the **protein system** till reaching a desired potential energy.
- [Step 1](#emStep1): Creating portable binary run file for energy minimization
- [Step 2](#emStep2): Energetically minimize the **system** till reaching a force of 500 kJ mol-1 nm-1.
- [Step 3](#emStep3): Checking **energy minimization** results. Plotting energy by time during the **minimization** process.
***
**Building Blocks** used:
- [Grompp](https://biobb-md.readthedocs.io/en/latest/gromacs.html#module-gromacs.grompp) from **biobb_md.gromacs.grompp**
- [Mdrun](https://biobb-md.readthedocs.io/en/latest/gromacs.html#module-gromacs.mdrun) from **biobb_md.gromacs.mdrun**
- [GMXEnergy](https://biobb-analysis.readthedocs.io/en/latest/gromacs.html#module-gromacs.gmx_energy) from **biobb_analysis.gromacs.gmx_energy**
***
<a id="emStep1"></a>
### Step 1: Creating portable binary run file for energy minimization
The **minimization** type of the **molecular dynamics parameters (mdp) property** contains the main default parameters to run an **energy minimization**:
- integrator = steep ; Algorithm (steep = steepest descent minimization)
- emtol = 1000.0 ; Stop minimization when the maximum force < 1000.0 kJ/mol/nm
- emstep = 0.01 ; Minimization step size (nm)
- nsteps = 50000 ; Maximum number of (minimization) steps to perform
In this particular example, the method used to run the **energy minimization** is the default **steepest descent**, but the **maximum force** is placed at **500 KJ/mol\*nm^2**, and the **maximum number of steps** to perform (if the maximum force is not reached) to **5,000 steps**.
<a id="emStep2"></a>
### Step 2: Running Energy Minimization
Running **energy minimization** using the **tpr file** generated in the previous step.
<a id="emStep3"></a>
### Step 3: Checking Energy Minimization results
Checking **energy minimization** results. Plotting **potential energy** by time during the minimization process.
<img src='plot1.png' />
<a id="nvt"></a>
***
## Equilibrate the system (NVT)
Equilibrate the **protein system** in **NVT ensemble** (constant Number of particles, Volume and Temperature). Protein **heavy atoms** will be restrained using position restraining forces: movement is permitted, but only after overcoming a substantial energy penalty. The utility of position restraints is that they allow us to equilibrate our solvent around our protein, without the added variable of structural changes in the protein.
- [Step 1](#eqNVTStep1): Creating portable binary run file for system equilibration
- [Step 2](#eqNVTStep2): Equilibrate the **protein system** with **NVT** ensemble.
- [Step 3](#eqNVTStep3): Checking **NVT Equilibration** results. Plotting **system temperature** by time during the **NVT equilibration** process.
***
**Building Blocks** used:
- [Grompp](https://biobb-md.readthedocs.io/en/latest/gromacs.html#module-gromacs.grompp) from **biobb_md.gromacs.grompp**
- [Mdrun](https://biobb-md.readthedocs.io/en/latest/gromacs.html#module-gromacs.mdrun) from **biobb_md.gromacs.mdrun**
- [GMXEnergy](https://biobb-analysis.readthedocs.io/en/latest/gromacs.html#module-gromacs.gmx_energy) from **biobb_analysis.gromacs.gmx_energy**
***
<a id="eqNVTStep1"></a>
### Step 1: Creating portable binary run file for system equilibration (NVT)
The **nvt** type of the **molecular dynamics parameters (mdp) property** contains the main default parameters to run an **NVT equilibration** with **protein restraints** (see [GROMACS mdp options](http://manual.gromacs.org/documentation/2018/user-guide/mdp-options.html)):
- Define = -DPOSRES
- integrator = md
- dt = 0.002
- nsteps = 5000
- pcoupl = no
- gen_vel = yes
- gen_temp = 300
- gen_seed = -1
In this particular example, the default parameters will be used: **md** integrator algorithm, a **step size** of **2fs**, **5,000 equilibration steps** with the protein **heavy atoms restrained**, and a temperature of **300K**.
*Please note that for the sake of time this tutorial is only running 10ps of NVT equilibration, whereas in the [original example](http://www.mdtutorials.com/gmx/lysozyme/06_equil.html) the simulated time was 100ps.*
<a id="eqNVTStep2"></a>
### Step 2: Running NVT equilibration
<a id="eqNVTStep3"></a>
### Step 3: Checking NVT Equilibration results
Checking **NVT Equilibration** results. Plotting **system temperature** by time during the NVT equilibration process.
<img src='plot2.png' />
<a id="npt"></a>
***
## Equilibrate the system (NPT)
Equilibrate the **protein system** in **NPT** ensemble (constant Number of particles, Pressure and Temperature).
- [Step 1](#eqNPTStep1): Creating portable binary run file for system equilibration
- [Step 2](#eqNPTStep2): Equilibrate the **protein system** with **NPT** ensemble.
- [Step 3](#eqNPTStep3): Checking **NPT Equilibration** results. Plotting **system pressure and density** by time during the **NPT equilibration** process.
***
**Building Blocks** used:
- [Grompp](https://biobb-md.readthedocs.io/en/latest/gromacs.html#module-gromacs.grompp) from **biobb_md.gromacs.grompp**
- [Mdrun](https://biobb-md.readthedocs.io/en/latest/gromacs.html#module-gromacs.mdrun) from **biobb_md.gromacs.mdrun**
- [GMXEnergy](https://biobb-analysis.readthedocs.io/en/latest/gromacs.html#module-gromacs.gmx_energy) from **biobb_analysis.gromacs.gmx_energy**
***
<a id="eqNPTStep1"></a>
### Step 1: Creating portable binary run file for system equilibration (NPT)
The **npt** type of the **molecular dynamics parameters (mdp) property** contains the main default parameters to run an **NPT equilibration** with **protein restraints** (see [GROMACS mdp options](http://manual.gromacs.org/documentation/2018/user-guide/mdp-options.html)):
- Define = -DPOSRES
- integrator = md
- dt = 0.002
- nsteps = 5000
- pcoupl = Parrinello-Rahman
- pcoupltype = isotropic
- tau_p = 1.0
- ref_p = 1.0
- compressibility = 4.5e-5
- refcoord_scaling = com
- gen_vel = no
In this particular example, the default parameters will be used: **md** integrator algorithm, a **time step** of **2fs**, **5,000 equilibration steps** with the protein **heavy atoms restrained**, and a Parrinello-Rahman **pressure coupling** algorithm.
*Please note that for the sake of time this tutorial is only running 10ps of NPT equilibration, whereas in the [original example](http://www.mdtutorials.com/gmx/lysozyme/07_equil2.html) the simulated time was 100ps.*
<a id="eqNPTStep2"></a>
### Step 2: Running NPT equilibration
<a id="eqNPTStep3"></a>
### Step 3: Checking NPT Equilibration results
Checking **NPT Equilibration** results. Plotting **system pressure and density** by time during the **NPT equilibration** process.
<img src='plot3.png' />
<a id="free"></a>
***
## Free Molecular Dynamics Simulation
Upon completion of the **two equilibration phases (NVT and NPT)**, the system is now well-equilibrated at the desired temperature and pressure. The **position restraints** can now be released. The last step of the **protein** MD setup is a short, **free MD simulation**, to ensure the robustness of the system.
- [Step 1](#mdStep1): Creating portable binary run file to run a **free MD simulation**.
- [Step 2](#mdStep2): Run short MD simulation of the **protein system**.
- [Step 3](#mdStep3): Checking results for the final step of the setup process, the **free MD run**. Plotting **Root Mean Square deviation (RMSd)** and **Radius of Gyration (Rgyr)** by time during the **free MD run** step.
***
**Building Blocks** used:
- [Grompp](https://biobb-md.readthedocs.io/en/latest/gromacs.html#module-gromacs.grompp) from **biobb_md.gromacs.grompp**
- [Mdrun](https://biobb-md.readthedocs.io/en/latest/gromacs.html#module-gromacs.mdrun) from **biobb_md.gromacs.mdrun**
- [GMXRms](https://biobb-analysis.readthedocs.io/en/latest/gromacs.html#module-gromacs.gmx_rms) from **biobb_analysis.gromacs.gmx_rms**
- [GMXRgyr](https://biobb-analysis.readthedocs.io/en/latest/gromacs.html#module-gromacs.gmx_rgyr) from **biobb_analysis.gromacs.gmx_rgyr**
***
<a id="mdStep1"></a>
### Step 1: Creating portable binary run file to run a free MD simulation
The **free** type of the **molecular dynamics parameters (mdp) property** contains the main default parameters to run an **free MD simulation** (see [GROMACS mdp options](http://manual.gromacs.org/documentation/2018/user-guide/mdp-options.html)):
- integrator = md
- dt = 0.002 (ps)
- nsteps = 50000
In this particular example, the default parameters will be used: **md** integrator algorithm, a **time step** of **2fs**, and a total of **50,000 md steps** (100ps).
*Please note that for the sake of time this tutorial is only running 100ps of free MD, whereas in the [original example](http://www.mdtutorials.com/gmx/lysozyme/08_MD.html) the simulated time was 1ns (1000ps).*
<a id="mdStep2"></a>
### Step 2: Running short free MD simulation
<a id="mdStep3"></a>
### Step 3: Checking free MD simulation results
Checking results for the final step of the setup process, the **free MD run**. Plotting **Root Mean Square deviation (RMSd)** and **Radius of Gyration (Rgyr)** by time during the **free MD run** step. **RMSd** against the **experimental structure** (input structure of the pipeline) and against the **minimized and equilibrated structure** (output structure of the NPT equilibration step).
<img src='plot4.png' />
<img src='plot5.png' />
<a id="post"></a>
***
## Post-processing and Visualizing resulting 3D trajectory
Post-processing and Visualizing the **protein system** MD setup **resulting trajectory** using **NGL**
- [Step 1](#ppStep1): *Imaging* the resulting trajectory, **stripping out water molecules and ions** and **correcting periodicity issues**.
- [Step 2](#ppStep2): Generating a *dry* structure, **removing water molecules and ions** from the final snapshot of the MD setup pipeline.
- [Step 3](#ppStep3): Visualizing the *imaged* trajectory using the *dry* structure as a **topology**.
***
**Building Blocks** used:
- [GMXImage](https://biobb-analysis.readthedocs.io/en/latest/gromacs.html#module-gromacs.gmx_image) from **biobb_analysis.gromacs.gmx_image**
- [GMXTrjConvStr](https://biobb-analysis.readthedocs.io/en/latest/gromacs.html#module-gromacs.gmx_trjconv_str) from **biobb_analysis.gromacs.gmx_trjconv_str**
***
<a id="ppStep1"></a>
### Step 1: *Imaging* the resulting trajectory.
Stripping out **water molecules and ions** and **correcting periodicity issues**
<a id="ppStep2"></a>
### Step 2: Generating the output *dry* structure.
**Removing water molecules and ions** from the resulting structure
<a id="ppStep3"></a>
### Step 3: Visualizing the generated dehydrated trajectory.
Using the **imaged trajectory** (output of the [Post-processing step 1](#ppStep1)) with the **dry structure** (output of the [Post-processing step 2](#ppStep2)) as a topology.
| 0.879043 | 0.986058 |
```
# Load packages
import tensorflow as tf
import pandas as pd
from tensorflow import keras
import numpy as np
import pandas as pd
import os
import scipy as scp
import scipy.stats as scps
import pickle
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
from datetime import datetime
import time
import scipy.optimize as scp_opt
from scipy.stats import mode
from statsmodels.distributions.empirical_distribution import ECDF
= pd.read_csv('/Users/afengler/OneDrive/project_nn_likelihoods/data/kde/ddm/keras_models/dnnregressor_ddm_09_26_20_08_52_12/training_history.csv')
import yaml
model = 'full_ddm2'
with open('model_paths_home.yaml') as tmp_file:
file_path = yaml.load(tmp_file)[model]
data = pd.read_csv(file_path + '/training_history.csv', index_col = 0)
data
file_path
data
def loss_curves(loss_panda = [],
save = True,
machine = 'home',
model = 'ddm',
plot_format = 'svg'):
matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['svg.fonttype'] = 'none'
sns.set(style = 'white',
palette = 'muted',
color_codes = True,
font_scale = 2)
fig, ax = plt.subplots(1, 1,
figsize = (10, 10),
sharex = False,
sharey = False)
my_suptitle = fig.suptitle('DDM LIKELIHOOD EVALUATION ON SINGLE DATASETS')
sns.despine(right = True)
xs = [i for i in range(len(loss_panda['loss'].values))]
# HUBER
ax.plot(xs, loss_panda['loss'],
label = 'Huber-train',
color = 'black',
alpha = 0.5)
ax.plot(xs, loss_panda['val_loss'],
label = 'Huber-val',
color = 'black',
linestyle = 'dashed')
# MSE
ax.plot(xs, loss_panda['mse'],
color = 'blue',
label = 'MSE-train',
alpha = 0.5)
ax.plot(xs, loss_panda['val_mse'],
color = 'blue',
label = 'MSE-val',
linestyle = 'dashed')
ax.set_xlabel('Epoch', size = 28)
ax.set_ylabel('Loss', size = 28)
ax.tick_params(labelsize = 24)
ax.legend()
if save == True:
if machine == 'home':
fig_dir = "/users/afengler/OneDrive/git_repos/nn_likelihoods/figures/mlp/loss_curves/"
if not os.path.isdir(fig_dir):
os.mkdir(fig_dir)
if plot_format == 'png':
plt.savefig(fig_dir + '/' + 'loss_curve_mlp_' + model + '.png',
dpi = 300,
transparent = False,
bbox_inches = 'tight',
bbox_extra_artists = [my_suptitle])
if plot_format == 'svg':
plt.savefig(fig_dir + '/' + 'loss_curve_mlp_' + model + '.svg',
format = 'svg',
transparent = True,
bbox_inches = 'tight',
bbox_extra_artists = [my_suptitle],
frameon = False)
#plt.close()
plt.show()
loss_curves(loss_panda = data,
model = model)
def forward_timings(mean_timings = None, # list [timings_cnn_1024, timings_cnn_4096, timings_mlp_1024, timings_mlp_4096]
machine = 'home',
save = False,
plot_format = 'svg'):
matplotlib.rcParams['text.usetex'] = True
#matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['svg.fonttype'] = 'none'
sns.set(style = "white",
palette = "muted",
color_codes = True,
font_scale = 2)
fig, ax = plt.subplots(1, 1,
figsize = (10, 10),
sharex = False,
sharey = False)
my_suptitle = fig.suptitle('DDM LIKELIHOOD EVALUATION ON SINGLE DATASETS',
fontsize = 32)
sns.despine(right = True)
for col in mean_timings.keys():
if col == 'nsamples' or col == 'lognsamples':
continue
if col == 'CNN':
color_tmp = 'red'
if col == 'MLP':
color_tmp = 'blue'
if col == 'MLP-Numpy':
color_tmp = 'aqua'
continue
if col == '100k-sim':
color_tmp = 'aqua'
if col == 'Navarro Fuss':
color_tmp = 'violet'
ax.scatter(mean_timings['lognsamples'].values,
mean_timings[col].values,
color = color_tmp,
marker = 's',
label = col)
ax.set_xlabel('log2 Data Set Size')
ax.set_ylabel('Time in ms')
# plt.scatter(mean_timings['nsamples'].values, mean_timings[col].values,
# label = col)
ax.legend()
# ax.scatter(x_cnn_4096, np.array([i for i in range(len(plot_vals['CNN-4096'].keys()))]) + offset_list[3],
# c = 'red',
# marker = 's',
# alpha = .5,
# label = 'CNN-1024')
if save == True:
if machine == 'home':
fig_dir = "/users/afengler/OneDrive/git_repos/nn_likelihoods/figures/timings/"
if not os.path.isdir(fig_dir):
os.mkdir(fig_dir)
if plot_format == 'png':
plt.savefig(fig_dir + '/' + 'timings_batch_1' + '.png',
dpi = 300,
transparent = False,
bbox_inches = 'tight',
bbox_extra_artists = [my_suptitle])
if plot_format == 'svg':
plt.savefig(fig_dir + '/' + 'timings_batch_1' + '.svg',
format = 'svg',
transparent = True,
bbox_inches = 'tight',
bbox_extra_artists = [my_suptitle],
frameon = False)
plt.close()
# Show
return #plt.show(block = False)
else:
return plt.show()
import boundary_functions as bf
for a in np.linspace(3.0, 3.5, 100):
out = bf.weibull_cdf(t = np.linspace(0.01, 5 , 1000), alpha = a, beta = 3)
plt.plot(np.linspace(0, 5 , 1000), out, alpha = 0.1, c = 'black')
plt.plot(np.linspace(0, 5, 1000), - out, alpha = 0.1, c = 'black')
plt.plot(np.linspace(0, 10 , 1000), out)
np.exp( - np.power( np.divide(t, beta), alpha))
```
|
github_jupyter
|
# Load packages
import tensorflow as tf
import pandas as pd
from tensorflow import keras
import numpy as np
import pandas as pd
import os
import scipy as scp
import scipy.stats as scps
import pickle
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
from datetime import datetime
import time
import scipy.optimize as scp_opt
from scipy.stats import mode
from statsmodels.distributions.empirical_distribution import ECDF
= pd.read_csv('/Users/afengler/OneDrive/project_nn_likelihoods/data/kde/ddm/keras_models/dnnregressor_ddm_09_26_20_08_52_12/training_history.csv')
import yaml
model = 'full_ddm2'
with open('model_paths_home.yaml') as tmp_file:
file_path = yaml.load(tmp_file)[model]
data = pd.read_csv(file_path + '/training_history.csv', index_col = 0)
data
file_path
data
def loss_curves(loss_panda = [],
save = True,
machine = 'home',
model = 'ddm',
plot_format = 'svg'):
matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['svg.fonttype'] = 'none'
sns.set(style = 'white',
palette = 'muted',
color_codes = True,
font_scale = 2)
fig, ax = plt.subplots(1, 1,
figsize = (10, 10),
sharex = False,
sharey = False)
my_suptitle = fig.suptitle('DDM LIKELIHOOD EVALUATION ON SINGLE DATASETS')
sns.despine(right = True)
xs = [i for i in range(len(loss_panda['loss'].values))]
# HUBER
ax.plot(xs, loss_panda['loss'],
label = 'Huber-train',
color = 'black',
alpha = 0.5)
ax.plot(xs, loss_panda['val_loss'],
label = 'Huber-val',
color = 'black',
linestyle = 'dashed')
# MSE
ax.plot(xs, loss_panda['mse'],
color = 'blue',
label = 'MSE-train',
alpha = 0.5)
ax.plot(xs, loss_panda['val_mse'],
color = 'blue',
label = 'MSE-val',
linestyle = 'dashed')
ax.set_xlabel('Epoch', size = 28)
ax.set_ylabel('Loss', size = 28)
ax.tick_params(labelsize = 24)
ax.legend()
if save == True:
if machine == 'home':
fig_dir = "/users/afengler/OneDrive/git_repos/nn_likelihoods/figures/mlp/loss_curves/"
if not os.path.isdir(fig_dir):
os.mkdir(fig_dir)
if plot_format == 'png':
plt.savefig(fig_dir + '/' + 'loss_curve_mlp_' + model + '.png',
dpi = 300,
transparent = False,
bbox_inches = 'tight',
bbox_extra_artists = [my_suptitle])
if plot_format == 'svg':
plt.savefig(fig_dir + '/' + 'loss_curve_mlp_' + model + '.svg',
format = 'svg',
transparent = True,
bbox_inches = 'tight',
bbox_extra_artists = [my_suptitle],
frameon = False)
#plt.close()
plt.show()
loss_curves(loss_panda = data,
model = model)
def forward_timings(mean_timings = None, # list [timings_cnn_1024, timings_cnn_4096, timings_mlp_1024, timings_mlp_4096]
machine = 'home',
save = False,
plot_format = 'svg'):
matplotlib.rcParams['text.usetex'] = True
#matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['svg.fonttype'] = 'none'
sns.set(style = "white",
palette = "muted",
color_codes = True,
font_scale = 2)
fig, ax = plt.subplots(1, 1,
figsize = (10, 10),
sharex = False,
sharey = False)
my_suptitle = fig.suptitle('DDM LIKELIHOOD EVALUATION ON SINGLE DATASETS',
fontsize = 32)
sns.despine(right = True)
for col in mean_timings.keys():
if col == 'nsamples' or col == 'lognsamples':
continue
if col == 'CNN':
color_tmp = 'red'
if col == 'MLP':
color_tmp = 'blue'
if col == 'MLP-Numpy':
color_tmp = 'aqua'
continue
if col == '100k-sim':
color_tmp = 'aqua'
if col == 'Navarro Fuss':
color_tmp = 'violet'
ax.scatter(mean_timings['lognsamples'].values,
mean_timings[col].values,
color = color_tmp,
marker = 's',
label = col)
ax.set_xlabel('log2 Data Set Size')
ax.set_ylabel('Time in ms')
# plt.scatter(mean_timings['nsamples'].values, mean_timings[col].values,
# label = col)
ax.legend()
# ax.scatter(x_cnn_4096, np.array([i for i in range(len(plot_vals['CNN-4096'].keys()))]) + offset_list[3],
# c = 'red',
# marker = 's',
# alpha = .5,
# label = 'CNN-1024')
if save == True:
if machine == 'home':
fig_dir = "/users/afengler/OneDrive/git_repos/nn_likelihoods/figures/timings/"
if not os.path.isdir(fig_dir):
os.mkdir(fig_dir)
if plot_format == 'png':
plt.savefig(fig_dir + '/' + 'timings_batch_1' + '.png',
dpi = 300,
transparent = False,
bbox_inches = 'tight',
bbox_extra_artists = [my_suptitle])
if plot_format == 'svg':
plt.savefig(fig_dir + '/' + 'timings_batch_1' + '.svg',
format = 'svg',
transparent = True,
bbox_inches = 'tight',
bbox_extra_artists = [my_suptitle],
frameon = False)
plt.close()
# Show
return #plt.show(block = False)
else:
return plt.show()
import boundary_functions as bf
for a in np.linspace(3.0, 3.5, 100):
out = bf.weibull_cdf(t = np.linspace(0.01, 5 , 1000), alpha = a, beta = 3)
plt.plot(np.linspace(0, 5 , 1000), out, alpha = 0.1, c = 'black')
plt.plot(np.linspace(0, 5, 1000), - out, alpha = 0.1, c = 'black')
plt.plot(np.linspace(0, 10 , 1000), out)
np.exp( - np.power( np.divide(t, beta), alpha))
| 0.552057 | 0.240685 |
Recurrent Neural Networks
===
A recurrent neural network (RNN) is a class of neural network that excels when your data can be treated as a sequence - such as text, music, speech recognition, connected handwriting, or data over a time period.
RNNs can analyse or predict a word based on the previous words in a sentence - they allow a connection between previous information and current information.
This exercise looks at implementing a LSTM RNN to generate new characters after learning from a large sample of text. LSTMs are a special type of RNN which dramatically improves the model’s ability to connect previous data to current data where there is a long gap.
We will train an RNN model using a novel written by H. G. Wells - The Time Machine.
Step 1
------
Let's start by loading our libraries looking at our text file. This might take a few minutes.
```
# Run this!
suppressMessages(install.packages("keras"))
suppressMessages(install.packages("tokenizers"))
suppressMessages(install.packages("stringr"))
suppressMessages(library(keras))
suppressMessages(library(readr))
suppressMessages(library(stringr))
suppressMessages(library(purrr))
suppressMessages(library(tokenizers))
suppressMessages(install_keras())
path <- file.path("Data/time-edit.txt")
# Let's have a look at the text
read_lines(path)
```
Expected output:
```The Time Traveller (for so it will be convenient to speak of him) was expounding a recondite matter to us. His pale grey eyes shone and twinkled, and his usually pale face was flushed and animated.
text length: 174201 characters
unique characters: 39```
Step 2
-----
Next we'll divide the text into sequences of 35 characters.
Then for each sequence we'll make a training set - the following character will be the correct output for the test set.
### In the cell below replace:
#### 1. `<textSequenceLength>` with `35`
#### 2. `<pathToDataset>` with `path`
#### then __run the code__.
```
###
# REPLACE <textSequenceLength> WITH 35
###
maxlen <- <textSequenceLength>
###
# This makes all the characters lower case, and separates the individual characters from whole words.
###
# REPLACE <pathToDataset> WITH path
###
text <- read_lines(<pathToDataset>) %>%
###
str_to_lower() %>%
str_c(collapse = "\n") %>%
tokenize_characters(strip_non_alphanum = FALSE, simplify = TRUE)
print(sprintf("Total length: %d", length(text)))
chars <- text %>%
unique() %>%
sort()
print(sprintf("Total chars: %d", length(chars)))
```
Expected output:
`"Total length: 174666"`
`"Total chars: 29"`
#### Replace the 3 `<maximumLength>`'s with `maxlen`
```
###
# REPLACE ALL THE <maximumLength>'s WITH maxlen
###
dataset <- map(
seq(1, length(text) - <maximumLength> - 1, by = 6),
~list(sentence = text[.x:(.x + <maximumLength> - 1)], next_char = text[.x + <maximumLength>])
)
###
dataset <- transpose(dataset)
x <- array(0, dim = c(length(dataset$sentence), maxlen, length(chars)))
y <- array(0, dim = c(length(dataset$sentence), length(chars)))
for(i in 1:length(dataset$sentence)){
x[i,,] <- sapply(chars, function(x){
as.integer(x == dataset$sentence[[i]])
})
y[i,] <- as.integer(chars == dataset$next_char[[i]])
}
```
Step 3
------
Let's build our model, using a single LSTM layer of 64 units. We'll keep the model simple for now, so that training does not take too long.
#### Replace the `<layerSize>` with 64, and run the cell.
```
model <- keras_model_sequential()
###
# REPLACE <layerSize> WITH 64
###
model %>%
layer_lstm(<layerSize>, input_shape = c(maxlen, length(chars))) %>%
###
layer_dense(length(chars)) %>%
layer_activation("softmax")
model %>% compile(
loss = "categorical_crossentropy",
optimizer = "Adam"
)
```
We'll just get a few helper functions ready, run the cell below to prepare them.
```
# Run this cell!
sample_mod <- function(preds, temperature = 1){
preds <- log(preds)/temperature
exp_preds <- exp(preds)
preds <- exp_preds/sum(exp(preds))
rmultinom(1, 1, preds) %>%
as.integer() %>%
which.max()
}
on_epoch_end <- function(epoch, logs) {
cat(sprintf("epoch: %02d ---------------\n\n", epoch))
diversity <- 0.5
generated <- ""
cat(sprintf("diversity: %f ---------------\n\n", diversity))
start_index <- sample(1:(length(text) - maxlen), size = 1)
sentence <- text[start_index:(start_index + maxlen - 1)]
for(i in 1:400){
x <- sapply(chars, function(x){
as.integer(x == sentence)
})
x <- array_reshape(x, c(1, dim(x)))
preds <- predict(model, x)
next_index <- sample_mod(preds, diversity)
next_char <- chars[next_index]
generated <- str_c(generated, next_char, collapse = "")
sentence <- c(sentence[-1], next_char)
}
cat(generated)
cat("\n\n")
}
```
Ready to go. The next cell will train the model.
Training RNN's on low compute takes a long time. We'll only build a small one for now. If you want to leave this model training for longer change the number of epochs to a larger number.
#### Replace the `<epochNumber>` with 3 and run the cell.
```
# This will take a little while...
print_callback <- callback_lambda(on_epoch_end = on_epoch_end)
history <- model %>% fit(
x, y,
batch_size = 1,
###
# REPLACE <epochNumber> WITH 3
###
epochs = <epochNumber>,
###
callbacks = print_callback
)
```
The output won't appear to be very good. But then, this dataset is small, and we have trained it only for a short time using a rather small RNN. Feel free to increase the number of epochs and leave it training for a long time if you want to see better results.
We could improve our model by:
* Having a larger training set.
* Increasing the number of LSTM units.
* Training it for longer
* Experimenting with difference activation functions, optimization functions etc
Conclusion
--------
We have trained an RNN that learns to predict characters based on a text sequence. We have trained a lightweight model from scratch.
|
github_jupyter
|
# Run this!
suppressMessages(install.packages("keras"))
suppressMessages(install.packages("tokenizers"))
suppressMessages(install.packages("stringr"))
suppressMessages(library(keras))
suppressMessages(library(readr))
suppressMessages(library(stringr))
suppressMessages(library(purrr))
suppressMessages(library(tokenizers))
suppressMessages(install_keras())
path <- file.path("Data/time-edit.txt")
# Let's have a look at the text
read_lines(path)
Step 2
-----
Next we'll divide the text into sequences of 35 characters.
Then for each sequence we'll make a training set - the following character will be the correct output for the test set.
### In the cell below replace:
#### 1. `<textSequenceLength>` with `35`
#### 2. `<pathToDataset>` with `path`
#### then __run the code__.
Expected output:
`"Total length: 174666"`
`"Total chars: 29"`
#### Replace the 3 `<maximumLength>`'s with `maxlen`
Step 3
------
Let's build our model, using a single LSTM layer of 64 units. We'll keep the model simple for now, so that training does not take too long.
#### Replace the `<layerSize>` with 64, and run the cell.
We'll just get a few helper functions ready, run the cell below to prepare them.
Ready to go. The next cell will train the model.
Training RNN's on low compute takes a long time. We'll only build a small one for now. If you want to leave this model training for longer change the number of epochs to a larger number.
#### Replace the `<epochNumber>` with 3 and run the cell.
| 0.654674 | 0.960694 |
```
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, padding=1) #input 1x28x28 OUtput32x28x28 RF 3x3
self.bn0=nn.BatchNorm2d(32)
self.conv2 = nn.Conv2d(32, 32, 3, padding=1)#input 32x28x28 OUtput 64x28x28 RF 5x5
self.bn1=nn.BatchNorm2d(32)
self.pool1 = nn.MaxPool2d(2, 2)#input 64x28x28 OUtput 64x14x14 RF 10x10
self.drop=nn.Dropout(0.2)
self.conv3_1 = nn.Conv2d(32, 16, 1, padding=1)
self.bn2=nn.BatchNorm2d(16)
self.conv3 = nn.Conv2d(16, 16, 3, padding=1)#input 64x14x14 OUtput 128x14x14 RF 12x12
self.bn3=nn.BatchNorm2d(16)
self.conv4 = nn.Conv2d(16, 16, 3, padding=1)#input 128x14x14 OUtput 256x14x14 RF 14x14
self.bn4=nn.BatchNorm2d(16)
self.pool2 = nn.MaxPool2d(2, 2)#input -256x14x14 OUtput256x7x7 RF 28x28
self.conv5 = nn.Conv2d(16, 16, 3, padding=1)
self.conv6 = nn.Conv2d(16, 10, 3, padding=1)
#self.conv5 = nn.Conv2d(256, 512, 3)#input -256x7x7 OUtput 512x5x5 RF 30x30
#self.conv6 = nn.Conv2d(512, 1024, 3)#input 512x5x5 OUtput 1024x3x3 RF 32x32
#self.conv7 = nn.Conv2d(1024, 10, 3)#input 1024x3x3 OUtput 10x1x1 RF 34x34
#self.fc1 = nn.Linear(64,128)
#self.fc2 = nn.Linear(128,10)
def forward(self, x):
x = self.drop(self.pool1(self.bn1(F.relu(self.conv2(self.bn0(F.relu(self.conv1(x))))))))
#print(x.shape)
x = self.drop(self.pool2(self.bn4(F.relu(self.conv4(self.bn3(F.relu(self.conv3(self.bn2(F.relu(self.conv3_1(x)))))))))))
#print(x.shape)
x = F.adaptive_avg_pool2d(self.conv6(F.relu(self.conv5(x))), output_size=1)
#print(x.shape)
#x=x.view(-1, 64)
#x = self.fc2(F.relu(self.fc1(x)))
'''
x = F.relu(self.conv6(F.relu(self.conv5(x))))
x = self.conv7(x)
'''
x = x.view(-1, 10)
return F.log_softmax(x)
!pip install torchsummary
from torchsummary import summary
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
model = Net().to(device)
summary(model, input_size=(1, 28, 28))
torch.manual_seed(1)
batch_size = 64
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True, **kwargs)
from tqdm import tqdm
def train(model, device, train_loader, optimizer, epoch):
model.train()
pbar = tqdm(train_loader)
for batch_idx, (data, target) in enumerate(pbar):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
pbar.set_description(desc= f'loss={loss.item()} batch_id={batch_idx}')
def test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
model = Net().to(device)
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
for epoch in range(1, 20):
train(model, device, train_loader, optimizer, epoch)
test(model, device, test_loader)
```
|
github_jupyter
|
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, padding=1) #input 1x28x28 OUtput32x28x28 RF 3x3
self.bn0=nn.BatchNorm2d(32)
self.conv2 = nn.Conv2d(32, 32, 3, padding=1)#input 32x28x28 OUtput 64x28x28 RF 5x5
self.bn1=nn.BatchNorm2d(32)
self.pool1 = nn.MaxPool2d(2, 2)#input 64x28x28 OUtput 64x14x14 RF 10x10
self.drop=nn.Dropout(0.2)
self.conv3_1 = nn.Conv2d(32, 16, 1, padding=1)
self.bn2=nn.BatchNorm2d(16)
self.conv3 = nn.Conv2d(16, 16, 3, padding=1)#input 64x14x14 OUtput 128x14x14 RF 12x12
self.bn3=nn.BatchNorm2d(16)
self.conv4 = nn.Conv2d(16, 16, 3, padding=1)#input 128x14x14 OUtput 256x14x14 RF 14x14
self.bn4=nn.BatchNorm2d(16)
self.pool2 = nn.MaxPool2d(2, 2)#input -256x14x14 OUtput256x7x7 RF 28x28
self.conv5 = nn.Conv2d(16, 16, 3, padding=1)
self.conv6 = nn.Conv2d(16, 10, 3, padding=1)
#self.conv5 = nn.Conv2d(256, 512, 3)#input -256x7x7 OUtput 512x5x5 RF 30x30
#self.conv6 = nn.Conv2d(512, 1024, 3)#input 512x5x5 OUtput 1024x3x3 RF 32x32
#self.conv7 = nn.Conv2d(1024, 10, 3)#input 1024x3x3 OUtput 10x1x1 RF 34x34
#self.fc1 = nn.Linear(64,128)
#self.fc2 = nn.Linear(128,10)
def forward(self, x):
x = self.drop(self.pool1(self.bn1(F.relu(self.conv2(self.bn0(F.relu(self.conv1(x))))))))
#print(x.shape)
x = self.drop(self.pool2(self.bn4(F.relu(self.conv4(self.bn3(F.relu(self.conv3(self.bn2(F.relu(self.conv3_1(x)))))))))))
#print(x.shape)
x = F.adaptive_avg_pool2d(self.conv6(F.relu(self.conv5(x))), output_size=1)
#print(x.shape)
#x=x.view(-1, 64)
#x = self.fc2(F.relu(self.fc1(x)))
'''
x = F.relu(self.conv6(F.relu(self.conv5(x))))
x = self.conv7(x)
'''
x = x.view(-1, 10)
return F.log_softmax(x)
!pip install torchsummary
from torchsummary import summary
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
model = Net().to(device)
summary(model, input_size=(1, 28, 28))
torch.manual_seed(1)
batch_size = 64
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True, **kwargs)
from tqdm import tqdm
def train(model, device, train_loader, optimizer, epoch):
model.train()
pbar = tqdm(train_loader)
for batch_idx, (data, target) in enumerate(pbar):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
pbar.set_description(desc= f'loss={loss.item()} batch_id={batch_idx}')
def test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
model = Net().to(device)
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
for epoch in range(1, 20):
train(model, device, train_loader, optimizer, epoch)
test(model, device, test_loader)
| 0.934679 | 0.581986 |
# 20-05-14: Daily Data Practice
---
### Daily Practices
* Practice with the common DS/ML tools and processes
* HackerRank SQL or Packt SQL Data Analytics
* Try to hit benchmark accuracies with [UCI ML datasets](https://archive.ics.uci.edu/ml/index.php)
* Kaggle
* Hands-on ML with sklearn, Keras, and TensorFlow
* Read, code along, take notes
* _test yourself on the concepts_ — i.e. do all the chapter exercises
* [fast.ai course](https://course.fast.ai/)
* Packt Workshops
* Interviewing
* Behavioral questions and scenarios
* Business case walk-throughs
* Hot-seat DS-related topics for recall practice (under pressure)
* Meta Data: Review and write
* Focus on a topic, review notes and resources, write a blog post about it
* Job sourcing
* Networking
* LinkedIn
* Indeed
---
## Reading: Naked Statistics
It is my goal today to reinvigorate myself and start a new phase of my understanding of statistics.
> I will learn to love it!
* Describe data
* An important power of statistics is the ability to boil down large / complex numbers into a single number
* Inference is using data from the "known world" to make informed inferences about the "unknown world"
* Make better decisions
* Asses / classify risk
* Find and describe (hopefully meaningful) relationships
#### Descriptive Statistics
1. Find some sort of "middle" or central tendency
* Different summary stats are misleading - or can be - in different ways, under different circumstances
* Percentiles provide comparative / relative measure
> Absolute number, score, figure: has some sort of intrinsic meaning.
> Context is not necessarily needed.
> Relative value, figure: has meaning only in comparison to something else.
> Needs context.
2. Find the spread of the data with Standard Deviation
* Weights of people on an airplane vs. people running a marathon
* one will be more varied / spread, with a fatter distribution
* Normal distribution: bell shaped and symmetrical distribution around mean
| StDevs | % |
| --- | --- |
| 1 | 68.2 |
| 2 | 95.4 |
| 3 | 99.7 |
`% change = (new - old) / old`
* An index is an example of a summary stat
* Lots of complex data simplified
* The important benefit is that now I can rank things that may have been untenable before
> Done with Chapter 2
---
## Writing
> Focus on a topic or project, learn/review the concepts, and write a blog post about it
* Finished recreating Tech Term Velocity on the new site
* Started working on print(fiction)
---
### DS + ML Practice
* Pick a dataset and try to do X with it
* Try to hit benchmark accuracies with [UCI ML datasets](https://archive.ics.uci.edu/ml/index.php)
* Kaggle
* Practice with the common DS/ML tools and processes
* Hands-on ML with sklearn, Keras, and TensorFlow
* Machine learning flashcards
#### _The goal is to be comfortable explaining the entire process._
* Data access / sourcing, cleaning
* SQL
* Pandas
* Exploratory data analysis
* Data wrangling techniques and processes
* Inference
* Statistics
* Probability
* Visualization
* Modeling
* Implement + justify choice of model / algorithm
* Track performance + justify choice of metrics
* Communicate results as relevant to the goal
Started getting into Ch 3 of Hands-On ML: Classification. More specifically, I read the section on evalutation metrics such as the confusion matrix.
Just to review once again, hopefully drilling this in my head for good:
* Precision = True Positives / (True Positives + False Positives)
* PREcision -> accuracy in PREdiected values
* Recall = Real positives = True Positives / (True Positives + False Negatives)
* REcall -> accuracy in REal values
|
github_jupyter
|
# 20-05-14: Daily Data Practice
---
### Daily Practices
* Practice with the common DS/ML tools and processes
* HackerRank SQL or Packt SQL Data Analytics
* Try to hit benchmark accuracies with [UCI ML datasets](https://archive.ics.uci.edu/ml/index.php)
* Kaggle
* Hands-on ML with sklearn, Keras, and TensorFlow
* Read, code along, take notes
* _test yourself on the concepts_ — i.e. do all the chapter exercises
* [fast.ai course](https://course.fast.ai/)
* Packt Workshops
* Interviewing
* Behavioral questions and scenarios
* Business case walk-throughs
* Hot-seat DS-related topics for recall practice (under pressure)
* Meta Data: Review and write
* Focus on a topic, review notes and resources, write a blog post about it
* Job sourcing
* Networking
* LinkedIn
* Indeed
---
## Reading: Naked Statistics
It is my goal today to reinvigorate myself and start a new phase of my understanding of statistics.
> I will learn to love it!
* Describe data
* An important power of statistics is the ability to boil down large / complex numbers into a single number
* Inference is using data from the "known world" to make informed inferences about the "unknown world"
* Make better decisions
* Asses / classify risk
* Find and describe (hopefully meaningful) relationships
#### Descriptive Statistics
1. Find some sort of "middle" or central tendency
* Different summary stats are misleading - or can be - in different ways, under different circumstances
* Percentiles provide comparative / relative measure
> Absolute number, score, figure: has some sort of intrinsic meaning.
> Context is not necessarily needed.
> Relative value, figure: has meaning only in comparison to something else.
> Needs context.
2. Find the spread of the data with Standard Deviation
* Weights of people on an airplane vs. people running a marathon
* one will be more varied / spread, with a fatter distribution
* Normal distribution: bell shaped and symmetrical distribution around mean
| StDevs | % |
| --- | --- |
| 1 | 68.2 |
| 2 | 95.4 |
| 3 | 99.7 |
`% change = (new - old) / old`
* An index is an example of a summary stat
* Lots of complex data simplified
* The important benefit is that now I can rank things that may have been untenable before
> Done with Chapter 2
---
## Writing
> Focus on a topic or project, learn/review the concepts, and write a blog post about it
* Finished recreating Tech Term Velocity on the new site
* Started working on print(fiction)
---
### DS + ML Practice
* Pick a dataset and try to do X with it
* Try to hit benchmark accuracies with [UCI ML datasets](https://archive.ics.uci.edu/ml/index.php)
* Kaggle
* Practice with the common DS/ML tools and processes
* Hands-on ML with sklearn, Keras, and TensorFlow
* Machine learning flashcards
#### _The goal is to be comfortable explaining the entire process._
* Data access / sourcing, cleaning
* SQL
* Pandas
* Exploratory data analysis
* Data wrangling techniques and processes
* Inference
* Statistics
* Probability
* Visualization
* Modeling
* Implement + justify choice of model / algorithm
* Track performance + justify choice of metrics
* Communicate results as relevant to the goal
Started getting into Ch 3 of Hands-On ML: Classification. More specifically, I read the section on evalutation metrics such as the confusion matrix.
Just to review once again, hopefully drilling this in my head for good:
* Precision = True Positives / (True Positives + False Positives)
* PREcision -> accuracy in PREdiected values
* Recall = Real positives = True Positives / (True Positives + False Negatives)
* REcall -> accuracy in REal values
| 0.847148 | 0.912981 |
# Recurrent Neural Network with 5 Input Features
In this notebook we will train a recurrent neural network using 5 input feature to perform binary classification of our dataset.
## Setup
We first need to import the libraries and frameworks to help us create and train our model.
- Numpy will allow us to manipulate our input data
- Matplotlib gives us easy graphs to visualize performance
- Sklearn helps us with data normalization and shuffling
- Keras is our deep learning frameworks which makes it easy to create and train our model
## Load Data
Here we load the numpy array that we create previously. We also need to reformat the data so that it can be processed as a sequence by the recurrent model. Let's check the dimension to make sure they are correctly formatted.
## Data Preparation
The neural network will perform better during training if data is normalized. We also want to shuffle the inputs to avoid training out model on a skewed dataset.
## Training - Test Split
Ideally we would split our dataset into a training, validation and test set. For this example we will only use a training and validation set. The training set will have 3000 samples and the validation set will contain the remaining samples.
```
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import Normalizer
from sklearn.utils import shuffle
from tensorflow.keras import models
from tensorflow.keras import layers
X = np.load("../db/x3.npy")
Y = np.load("../db/y3.npy")
transformer = Normalizer().fit(X)
X = transformer.transform(X) # normalizes data according to columns
X, Y = shuffle(X, Y, random_state=0) # shuffle the samples
X = np.reshape(X, (3559, 6, 5), order='F')
# np.save("x_time.npy", x_time)
X_train = X[:3000]
Y_train = Y[:3000]
X_test = X[3000:]
Y_test = Y[3000:]
print("Input training tensor: " + str(X_train.shape))
print("Label training tensor: " + str(Y_train.shape) + "\n")
# 559 test/validation samples
print("Input validation tensor: " + str(X_test.shape))
print("Label validation tensor: " + str(Y_test.shape))
```
## Defining our model
Here we finally create our model which in this case will be a 2 layer bidirectional LSTM with 64 recurrence unit.
We also choose an optimizer (RMSprop), a loss function (binary crossentropy) and our metric for evaluation (accuracy).
We can also take a look at the size of our model
```
rnn = models.Sequential()
rnn.add(layers.Bidirectional(layers.LSTM(64, return_sequences=True)))
rnn.add(layers.Bidirectional(layers.LSTM(64)))
rnn.add(layers.Dense(1, activation='sigmoid'))
rnn.compile(
optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy']
)
history = rnn.fit(
X_train,
Y_train,
epochs=200,
batch_size=30,
validation_data=(X_test,Y_test)
)
history_dict = history.history
rnn.summary()
print("Training accuracy: " + str(history_dict['accuracy'][-1]))
print("Training loss: " + str(history_dict['loss'][-1]) + "\n")
print("Validation accuracy: " + str(history_dict['val_accuracy'][-1]))
print("Validation loss: " + str(history_dict['val_loss'][-1]))
```
## Evaluating the Model
After our training we get ~78% accuracy on our validation data. When looking at our loss, we can see that our model is indeed learning and it does begin to overfitting after approximately 100 epochs.
```
loss_values = history_dict['loss']
val_loss_values = history_dict['val_loss']
epochs = range(1, len(loss_values) + 1)
plt.plot(epochs, loss_values, 'bo', label='Training loss')
plt.plot(epochs, val_loss_values, 'b', label='Validation loss')
plt.title('Losses')
plt.xlabel('Epoch')
plt.ylabel('Loss Evaluation')
plt.legend()
plt.show()
plt.clf()
loss_values = history_dict['accuracy']
val_loss_values = history_dict['val_accuracy']
epochs = range(1, len(loss_values) + 1)
plt.plot(epochs, loss_values, 'bo', label='Training accuracy')
plt.plot(epochs, val_loss_values, 'b', label='Validation accuracy')
plt.title('Accuracy Evaluation')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
plt.clf()
```
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import Normalizer
from sklearn.utils import shuffle
from tensorflow.keras import models
from tensorflow.keras import layers
X = np.load("../db/x3.npy")
Y = np.load("../db/y3.npy")
transformer = Normalizer().fit(X)
X = transformer.transform(X) # normalizes data according to columns
X, Y = shuffle(X, Y, random_state=0) # shuffle the samples
X = np.reshape(X, (3559, 6, 5), order='F')
# np.save("x_time.npy", x_time)
X_train = X[:3000]
Y_train = Y[:3000]
X_test = X[3000:]
Y_test = Y[3000:]
print("Input training tensor: " + str(X_train.shape))
print("Label training tensor: " + str(Y_train.shape) + "\n")
# 559 test/validation samples
print("Input validation tensor: " + str(X_test.shape))
print("Label validation tensor: " + str(Y_test.shape))
rnn = models.Sequential()
rnn.add(layers.Bidirectional(layers.LSTM(64, return_sequences=True)))
rnn.add(layers.Bidirectional(layers.LSTM(64)))
rnn.add(layers.Dense(1, activation='sigmoid'))
rnn.compile(
optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy']
)
history = rnn.fit(
X_train,
Y_train,
epochs=200,
batch_size=30,
validation_data=(X_test,Y_test)
)
history_dict = history.history
rnn.summary()
print("Training accuracy: " + str(history_dict['accuracy'][-1]))
print("Training loss: " + str(history_dict['loss'][-1]) + "\n")
print("Validation accuracy: " + str(history_dict['val_accuracy'][-1]))
print("Validation loss: " + str(history_dict['val_loss'][-1]))
loss_values = history_dict['loss']
val_loss_values = history_dict['val_loss']
epochs = range(1, len(loss_values) + 1)
plt.plot(epochs, loss_values, 'bo', label='Training loss')
plt.plot(epochs, val_loss_values, 'b', label='Validation loss')
plt.title('Losses')
plt.xlabel('Epoch')
plt.ylabel('Loss Evaluation')
plt.legend()
plt.show()
plt.clf()
loss_values = history_dict['accuracy']
val_loss_values = history_dict['val_accuracy']
epochs = range(1, len(loss_values) + 1)
plt.plot(epochs, loss_values, 'bo', label='Training accuracy')
plt.plot(epochs, val_loss_values, 'b', label='Validation accuracy')
plt.title('Accuracy Evaluation')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
plt.clf()
| 0.817829 | 0.988906 |
# Data Analyst's Toolbox: R and Python
> Institute of Political Science, NSYSU, 2020-12-25
Kuo, Yao-Jen <yaojenkuo@datainpoint.com> from [DATAINPOINT](https://www.datainpoint.com)
```
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
```
## TL; DR

Source: <https://memes.tw/>
## About me
## Teaching practical data science online/offline, for individuals
- [如何成為資料分析師:從問題解決到行動方案,Hahow 好學校](https://hahow.in/cr/dajourney)
- Visualization and modern data science, Adjunct Instructor, National Taiwan University
- Programming and business analytics, Adjunct Instructor, National Taiwan Normal University
- Python for data analysis, Instructor, Chunghwa Telecom Academy
- Python for data science, Machine learning from scratch, Senior Instructor, CSIE Training Program, National Taiwan University
## Also for commercial banking clients
- 2020 DBS Training Program
- 2019 HNCB Training Program
- 2017 ESUN Training Program
## Writing books
- [新手村逃脫!初心者的 Python 機器學習攻略](https://www.books.com.tw/products/0010867390)
- [進擊的資料科學](https://www.books.com.tw/products/0010827812)
- [輕鬆學習 R 語言](https://www.books.com.tw/products/0010835361)
## Writing blogs
- [Medium](https://medium.com/@tonykuoyj)
- [Substack](https://datainpoint.substack.com/about)
- [方格子](https://vocus.cc/user/@yaojenkuo)
## Before being a instructor
- Working experience
- Senior Data Analyst, Coupang Shanghai
- Analytical Consultant, SAS Taiwan
- Management Associate, Chinatrust Banking Corporation Taiwan
- Research Assistant, McKinsey & Company Taiwan
- Education
- MBA, National Taiwan University
- BA, National Taiwan University
## Loves running with a marathon PR of 2:43:12 at 2019 Seoul Marathon

Source: <https://giphy.com>
## What is data analysis
## The definition
> We generate questions about a specific topic, we search for answers by exploring, transforming, and modelling data referring to our topic. And then use what we've learned to refine questions or generate new questions.
Source: [R for Data Science](https://r4ds.had.co.nz/)
## Why data analysis
> It is now an era of data-driven strategic thinking, and is probably never coming back.
## The three means of persuasion that an orator must rely on
- Ethos
- Pathos
- Logos
Source: [Aristotle, Rhetoric](https://en.wikipedia.org/wiki/Rhetoric)
## It is a lot easier to persuade via ethos or pathos, but it takes time
However, logos can be easily acquired once it is a fact and can be proven. Hence, data analysis is often the express way to logos.
## Modern data analysis can be illustrated as the flow of data

Source: [R for Data Science](https://r4ds.had.co.nz/)
## The funny definitions

Source: <https://twitter.com/cdixon/status/428914681911070720/photo/1>

Source: <https://www.warnerbros.com/tv/friends/>

Source: <https://www.warnerbros.com/tv/friends/>
## The serious definition
> Modern data analysis invovles applications and tools like importing, tidying, transformation, visualization, modeling, and communication. Surrounding all these is programming.
```
def get_value_ratios(df, col_name):
return df[col_name].value_counts() / df[col_name].value_counts().sum()
def get_checkbox_ratios(df, col_pattern):
channel_names = []
channel_checks = []
for col_name in df.columns:
if (col_pattern in col_name) and ('OTHER_TEXT' not in col_name):
channel_name = df[col_name].value_counts().index
channel_check = df[col_name].value_counts().values
if channel_name.size != 0:
channel_names.append(channel_name[0])
channel_checks.append(channel_check[0])
channel_counts = pd.Series(channel_checks, index=channel_names)
channel_ratios = channel_counts / channel_counts.sum()
channel_ratios_sorted = channel_ratios.sort_values(ascending=False)
return channel_ratios_sorted
def plot_ans_38(ans_38_ser):
fig = plt.figure()
ax = plt.axes()
ans_38_ser = ans_38_ser[::-1]
ax.barh(ans_38_ser.index, ans_38_ser.values, color=['c', 'c', 'c', 'c', 'c', 'r'])
ax.set_title("What is the primary tool that you use at work or school to analyze data?")
ax.set_xticks([0, 0.1, 0.2, 0.3, 0.4, 0.5])
ax.set_xticklabels(['0%', '10%', '20%', '30%', '40%', '50%'])
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
plt.show()
def get_ax(ans_ser, plot_title, ax):
ax = ax
ans_ser = ans_ser[:10][::-1]
ax.barh(ans_ser.index, ans_ser.values, color=['c', 'c', 'c', 'c', 'c', 'c', 'c', 'r', 'r', 'r'])
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.set_title(plot_title)
def plot_ans(ans_ser, question_str):
fig, axes = plt.subplots()
get_ax(ans_ser, question_str, axes)
plt.show()
survey = pd.read_csv("https://kaggle-ml-ds-survey.s3-ap-northeast-1.amazonaws.com/kaggle-survey-2020/kaggle_survey_2020_responses.csv", skiprows=[1])
ans_7 = get_checkbox_ratios(survey, 'Q7')
ans_8 = get_value_ratios(survey, 'Q8')
ans_38 = get_value_ratios(survey, 'Q38')
```
## Use programming language to analyze data
Let's review a question from [2020 Kaggle ML & DS Survey](https://www.kaggle.com/c/kaggle-survey-2020):
> What is the primary tool that you use at work or school to analyze data?
```
plot_ans_38(ans_38)
```
## It seems inevitable to write codes in modern data analysis

Source: <https://giphy.com/>
## Simply put, we can choose any programming language as long as it is capable of
- Importing data
- Tidying data
- Transforming data
- Visualizing data
- Modeling data
- Communicating data
## Well, actually a lot of programming languages are capable of doing these
- Python
- R
- Julia
- Scala
- Matlab
- SAS
- ...etc.
## How to choose among so many alternatives?
- The philosophy of "Eating a water mellon".
- The full support of scientific computing.
- Our objectivity.
## The philosophy of "Eating a water mellon"
Let's review another 2 questions from [2020 Kaggle ML & DS Survey](https://www.kaggle.com/c/kaggle-survey-2020):
- What programming languages do you use on a regular basis?
- What programming language would you recommend an aspiring data scientist to learn first?
```
plot_ans(ans_7, "What programming languages do you use on a regular basis?")
plot_ans(ans_8, "What programming language would you recommend \n an aspiring data scientist to learn first?")
```
## R and Python in Stack Overflow Trends
<https://insights.stackoverflow.com/trends?tags=python%2Cr%2Cjulia%2Cscala%2Cmatlab%2Csas>
## The full support of scientific computing
- Does the language support vectorization?
- Does the language support various data format?
- Does the language support visualization?
## Both R and Python support vectorization
- R uses built-in `vector` and `matrix`.
- Python uses a third-party `ndarray`.
## Both R and Python support various data format
- R uses
- built-in named `list` to support key-value storage
- built-in `data.frame` to support tabular data
- Python uses
- built-in `dict` to support key-value storage
- third-party `DataFrame` to support tabular data
## Both R and Python support visualization
- R uses
- built-in basic plotting system to support static plotting
- third-party `ggplot2` to support high-end static plotting
- third-party `shiny` to support dynamic plotting
- Python uses
- third-party `matplotlib` to support static plotting
- third-party `seaborn` to support high-end static plotting
- third-party `plotly` to support dynamic plotting
## Last but not least, it depends on our objectivity
- Specific or general-purposed?
- Functional or object-oriented?
- ...etc.
## We will generate our own objectivity once we start coding

Source: <https://giphy.com>
## Let's write some codes to analyze data
## Bringing up a topic
> 大選開票看哪個里最準? 「章魚里」神預測告訴你。每次到了選舉,總是會有幾個里開票與大選結果相似,因此被各界視為重點關注的開票區域。
Source: <https://www.cw.com.tw/article/5093012>
## We can generate some questions regarding this topic
- How to define 「章魚里」?
- Can we find out 「章魚里」 based on 2020 presidential data?
- Can we find the similarity of our own village?
## How to define 「章魚里」?
Basically, after a few literature search, you may find the definition of 「章魚里」 is quite ambigious. So we are using a much fancier metric: **cosine similarity**.
## What is cosine similarity
> Cosine similarity is a measure of similarity between two non-zero vectors of an inner product space. It is defined to equal the cosine of the angle between them, which is also the same as the inner product of the same vectors normalized to both have length 1.
\begin{equation}
a = (a_1, a_2, a_3) \\
b = (b_1, b_2, b_3)
\end{equation}
\begin{align}
cos\theta &= \frac{\sum_i(a_i \times b_i)}{\sqrt{\sum_i a_i^2} \times \sqrt{\sum_i b_i^2}} \\
&= \frac{a \cdot b}{\parallel a \parallel \times \parallel b \parallel}
\end{align}
Source: <https://en.wikipedia.org/wiki/Cosine_similarity>
## Can we find out 「章魚里」 based on 2020 presidential data?
Definitely, we all have access to the [Central Election Commission](https://db.cec.gov.tw/).
## We've downloaded these spreadsheets and prepared an in-browser environment for you.
[](https://mybinder.org/v2/gh/yaojenkuo/talks/HEAD)
## We can access to a Python notebook, R notebook, or RStudio in browser, no strings attached.

## Besides a few kernels to execute, we also attached some data.
- Reading a CSV file.
- Reading a Excel spreadsheet.
```
# reading data via Python's pandas library
csv_df = pd.read_csv('presidential_2020.csv')
excel_df = pd.read_excel('presidential-2020/總統-A05-4-候選人得票數一覽表-各投開票所(南投縣).xls', skiprows=[0, 1, 3, 4])
```
## Our CSV file is an integrated file after manipulations
```
csv_df.head()
```
## Our Excel spreadsheets are the original files downloaded from [Central Election Commission](https://db.cec.gov.tw/)
```
excel_df.head()
```
## We can also try importing via the RStudio interface.
```r
library(readxl)
csv_df = read.csv('presidential_2020.csv')
excel_df = read_excel('presidential-2020/總統-A05-4-候選人得票數一覽表-各投開票所(南投縣).xls')
head(csv_df)
head(excel_df)
```
## We write codes to integrate these spreadsheets into a CSV file
```
from presidential import Presidential
presidential = Presidential('presidential-2020')
presidential_df = presidential.adjust_presidential_df()
presidential_df.to_csv('presidential_2020.csv', index=False)
presidential_df.head()
presidential_df.tail()
```
## Check if the summations are right with Python
```
ttl_votes = presidential_df['votes'].sum()
ttl_votes_by_candidates = presidential_df.groupby('number')['votes'].sum()
ttl_votes_by_candidates
```
## Check if the summations are right with R
```r
library(dplyr)
csv_df %>%
group_by(number) %>%
summarise(ttl_votes = sum(votes))
```
## National percentage is our target vector to be compared
```
national_percentage = ttl_votes_by_candidates / ttl_votes
national_percentage
```
## Total votes for each village
```
combined_key = presidential_df['county'].str.cat(presidential_df['town']).str.cat(presidential_df['village'])
presidential_df = presidential_df.assign(combined_key=combined_key)
ttl_votes_by_combined_key = presidential_df.groupby(['combined_key'])['votes'].sum()
ttl_votes_by_combined_key
```
## Votes percentage by each candidate and village
```
ttl_votes_by_combined_key_candidates = presidential_df.groupby(['combined_key', 'number'])['votes'].sum()
soong = ttl_votes_by_combined_key_candidates[:, '1'] / ttl_votes_by_combined_key
han = ttl_votes_by_combined_key_candidates[:, '2'] / ttl_votes_by_combined_key
tsai = ttl_votes_by_combined_key_candidates[:, '3'] / ttl_votes_by_combined_key
votes_obtained = pd.concat([soong, han, tsai], axis=1)
votes_obtained.columns = ['soong', 'han', 'tsai']
votes_obtained
```
## Calculate cosine similarity
```
a = national_percentage.values
a_norm = np.linalg.norm(a)
cos_similarities = []
for i in range(votes_obtained.shape[0]):
b = votes_obtained.iloc[i, :].values
b_norm = np.linalg.norm(b)
ab = np.dot(a, b)
cos_similarity = np.dot(a, b) / (a_norm*b_norm)
cos_similarities.append(cos_similarity)
votes_obtained = votes_obtained.assign(cosine_similarity=cos_similarities)
votes_obtained = votes_obtained.reset_index()
votes_obtained.head()
```
## Sort by cosine similarity with descending order to find 「章魚里」
```
votes_obtained.sort_values(['cosine_similarity', 'combined_key'], ascending=[False, True]).reset_index(drop=True).head(10)
```
## Can we find the similarity of our own village?
Definitely.
```
def find_my_village(my_village, df):
df = df.sort_values(['cosine_similarity', 'combined_key'], ascending=[False, True]).reset_index(drop=True)
my_village_df = df[df['combined_key'] == my_village]
return my_village_df
my_village = '高雄市鼓山區桃源里'
my_village_df = find_my_village(my_village, votes_obtained)
my_village_similarity = my_village_df['cosine_similarity'].values[0]
my_village_rank = my_village_df.index[0]
n_rows = votes_obtained.shape[0]
print("{}的餘弦相似度為{:.4f}, 排名{}/{}".format(my_village, my_village_similarity, my_village_rank, n_rows))
my_village_df
```
## Feeling movivated?
## Start with the most practical one: Python
- Procedural programming with Python
- Object-oriented programming with Python
- Using Python libraries
## Start with the most practical one: R
- Procedural programming with R
- Functional programming with R
- Using R libraries
## Resources I've used when learning Python
- [Introducing Python](https://www.amazon.com/Introducing-Python-Modern-Computing-Packages/dp/1449359361)
- [A Whirlwind Tour of Python](https://jakevdp.github.io/WhirlwindTourOfPython/index.html)
- [Python Data Science Handbook](https://jakevdp.github.io/PythonDataScienceHandbook/)
## Resources I've used when learning R
- [The Art of R Programming](https://www.amazon.com/Art-Programming-Statistical-Software-Design/dp/1593273843)
- [Advanced R](https://adv-r.hadley.nz/)
- [R for Data Science](https://r4ds.had.co.nz/)
- [Data Science Specialization](https://www.coursera.org/specializations/jhu-data-science)
- [Statistics with R Specialization](https://www.coursera.org/specializations/statistics)
## Learning resources from me
- [數據交點](https://www.datainpoint.com)
- [Substack](https://datainpoint.substack.com/about)
## Phew, that is a lot to catch up...
You do not have to finish every course or book from end to end.

Source: <https://giphy.com/>
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def get_value_ratios(df, col_name):
return df[col_name].value_counts() / df[col_name].value_counts().sum()
def get_checkbox_ratios(df, col_pattern):
channel_names = []
channel_checks = []
for col_name in df.columns:
if (col_pattern in col_name) and ('OTHER_TEXT' not in col_name):
channel_name = df[col_name].value_counts().index
channel_check = df[col_name].value_counts().values
if channel_name.size != 0:
channel_names.append(channel_name[0])
channel_checks.append(channel_check[0])
channel_counts = pd.Series(channel_checks, index=channel_names)
channel_ratios = channel_counts / channel_counts.sum()
channel_ratios_sorted = channel_ratios.sort_values(ascending=False)
return channel_ratios_sorted
def plot_ans_38(ans_38_ser):
fig = plt.figure()
ax = plt.axes()
ans_38_ser = ans_38_ser[::-1]
ax.barh(ans_38_ser.index, ans_38_ser.values, color=['c', 'c', 'c', 'c', 'c', 'r'])
ax.set_title("What is the primary tool that you use at work or school to analyze data?")
ax.set_xticks([0, 0.1, 0.2, 0.3, 0.4, 0.5])
ax.set_xticklabels(['0%', '10%', '20%', '30%', '40%', '50%'])
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
plt.show()
def get_ax(ans_ser, plot_title, ax):
ax = ax
ans_ser = ans_ser[:10][::-1]
ax.barh(ans_ser.index, ans_ser.values, color=['c', 'c', 'c', 'c', 'c', 'c', 'c', 'r', 'r', 'r'])
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.set_title(plot_title)
def plot_ans(ans_ser, question_str):
fig, axes = plt.subplots()
get_ax(ans_ser, question_str, axes)
plt.show()
survey = pd.read_csv("https://kaggle-ml-ds-survey.s3-ap-northeast-1.amazonaws.com/kaggle-survey-2020/kaggle_survey_2020_responses.csv", skiprows=[1])
ans_7 = get_checkbox_ratios(survey, 'Q7')
ans_8 = get_value_ratios(survey, 'Q8')
ans_38 = get_value_ratios(survey, 'Q38')
plot_ans_38(ans_38)
plot_ans(ans_7, "What programming languages do you use on a regular basis?")
plot_ans(ans_8, "What programming language would you recommend \n an aspiring data scientist to learn first?")
# reading data via Python's pandas library
csv_df = pd.read_csv('presidential_2020.csv')
excel_df = pd.read_excel('presidential-2020/總統-A05-4-候選人得票數一覽表-各投開票所(南投縣).xls', skiprows=[0, 1, 3, 4])
csv_df.head()
excel_df.head()
library(readxl)
csv_df = read.csv('presidential_2020.csv')
excel_df = read_excel('presidential-2020/總統-A05-4-候選人得票數一覽表-各投開票所(南投縣).xls')
head(csv_df)
head(excel_df)
from presidential import Presidential
presidential = Presidential('presidential-2020')
presidential_df = presidential.adjust_presidential_df()
presidential_df.to_csv('presidential_2020.csv', index=False)
presidential_df.head()
presidential_df.tail()
ttl_votes = presidential_df['votes'].sum()
ttl_votes_by_candidates = presidential_df.groupby('number')['votes'].sum()
ttl_votes_by_candidates
library(dplyr)
csv_df %>%
group_by(number) %>%
summarise(ttl_votes = sum(votes))
national_percentage = ttl_votes_by_candidates / ttl_votes
national_percentage
combined_key = presidential_df['county'].str.cat(presidential_df['town']).str.cat(presidential_df['village'])
presidential_df = presidential_df.assign(combined_key=combined_key)
ttl_votes_by_combined_key = presidential_df.groupby(['combined_key'])['votes'].sum()
ttl_votes_by_combined_key
ttl_votes_by_combined_key_candidates = presidential_df.groupby(['combined_key', 'number'])['votes'].sum()
soong = ttl_votes_by_combined_key_candidates[:, '1'] / ttl_votes_by_combined_key
han = ttl_votes_by_combined_key_candidates[:, '2'] / ttl_votes_by_combined_key
tsai = ttl_votes_by_combined_key_candidates[:, '3'] / ttl_votes_by_combined_key
votes_obtained = pd.concat([soong, han, tsai], axis=1)
votes_obtained.columns = ['soong', 'han', 'tsai']
votes_obtained
a = national_percentage.values
a_norm = np.linalg.norm(a)
cos_similarities = []
for i in range(votes_obtained.shape[0]):
b = votes_obtained.iloc[i, :].values
b_norm = np.linalg.norm(b)
ab = np.dot(a, b)
cos_similarity = np.dot(a, b) / (a_norm*b_norm)
cos_similarities.append(cos_similarity)
votes_obtained = votes_obtained.assign(cosine_similarity=cos_similarities)
votes_obtained = votes_obtained.reset_index()
votes_obtained.head()
votes_obtained.sort_values(['cosine_similarity', 'combined_key'], ascending=[False, True]).reset_index(drop=True).head(10)
def find_my_village(my_village, df):
df = df.sort_values(['cosine_similarity', 'combined_key'], ascending=[False, True]).reset_index(drop=True)
my_village_df = df[df['combined_key'] == my_village]
return my_village_df
my_village = '高雄市鼓山區桃源里'
my_village_df = find_my_village(my_village, votes_obtained)
my_village_similarity = my_village_df['cosine_similarity'].values[0]
my_village_rank = my_village_df.index[0]
n_rows = votes_obtained.shape[0]
print("{}的餘弦相似度為{:.4f}, 排名{}/{}".format(my_village, my_village_similarity, my_village_rank, n_rows))
my_village_df
| 0.439988 | 0.936749 |
# Optimal clustering by Zemel et al. - Adult data (race)
This notebook contains an implementation of the pre-processing fairness intervention introduced in Learning Fair Representations by Zemel et al. (2013) as part of the IBM AIF360 fairness tool box github.com/IBM/AIF360.
Here, we consider fairness defined with respect to race. There is another notebook considering fairness with respect to sex using Zemel et al.'s intervention method, which contains more details on the method. We follow analogous steps to the accompanying notebook addressing unfairness with respect to sex.
```
from pathlib import Path
import joblib
import numpy as np
import pandas as pd
from aif360.algorithms.preprocessing.lfr import LFR # noqa
from aif360.datasets import StandardDataset
from fairlearn.metrics import (
demographic_parity_difference,
demographic_parity_ratio,
)
from helpers.metrics import accuracy
from helpers.plot import group_bar_plots
```
## Load data
We have committed preprocessed data to the repository for reproducibility and we load it here. Check out hte preprocessing notebook for details on how this data was obtained.
```
artifacts_dir = Path("../../../artifacts")
data_dir = artifacts_dir / "data" / "adult"
train = pd.read_csv(data_dir / "processed" / "train-one-hot.csv")
val = pd.read_csv(data_dir / "processed" / "val-one-hot.csv")
test = pd.read_csv(data_dir / "processed" / "test-one-hot.csv")
```
### Remove points which are not white/black people
In order to analyse unfairness for binary protected attributes, namely, black / white race, we remove data points that correspond to races different to those two.
```
for data in [train, test, val]:
data = data[data.race_white + data.race_black == 1]
data.drop(
[
"race_amer_indian_eskimo",
"race_asian_pac_islander",
"race_other",
"race_black",
],
axis=1,
)
```
In order to process data for our fairness intervention we need to define special dataset objects which are part of every intervention pipeline within the IBM AIF360 toolbox. These objects contain the original data as well as some useful further information, e.g., which feature is the protected attribute as well as which column corresponds to the label.
```
train_sds = StandardDataset(
train,
label_name="salary",
favorable_classes=[1],
protected_attribute_names=["race_white"],
privileged_classes=[[1]],
)
test_sds = StandardDataset(
test,
label_name="salary",
favorable_classes=[1],
protected_attribute_names=["race_white"],
privileged_classes=[[1]],
)
val_sds = StandardDataset(
val,
label_name="salary",
favorable_classes=[1],
protected_attribute_names=["race_white"],
privileged_classes=[[1]],
)
index = train_sds.feature_names.index("race_white")
privileged_groups = [{"race_white": 1.0}]
unprivileged_groups = [{"race_white": 0.0}]
```
## Demographic parity
Given the original unfair data set we apply Zemel et al.'s intervention to obtain a fair data set including fair labels. More precisely, we load an already learnt mitigation or learn a new mitigation procedure based on the true and predicted labels of the training data. We then apply the learnt procedure to transform the testing data and analyse fairness and accuracy in the transformed testing data.
The degree of fairness and accuracy can be controlled by the choice of parameters $A_x, A_y, A_z$ and $k$ when setting up the mitigation procedure. Here, $A_x$ controls the loss associated with the distance between original and transformed data set, $A_y$ the accuracy loss and $A_z$ the fairness loss. The larger one of these parameter is chosen compared to the others, the larger the priority of minimising the loss associated with that parameter. Hence, leaving $A_x$ and $A_y$ fixed, we can increase the degree of fairness achieved by increasing the parameter $A_z$.
As differences in fairness between independently learnt mitigations with same parameter choice can sometimes be significant we load a pre-trained intervention which achieves reasonable results. The user is still encouraged to train inteventions themselves (see commented out code below), and compare achieved fairness, potentially for a number of indepedent runs.
## Train unfair model
For maximum reproducibility we load the baseline model from disk, but the code used to train can be found in the baseline model notebook.
```
bl_model = joblib.load(artifacts_dir / "models" / "finance" / "baseline.pkl")
bl_test_probs = bl_model.predict_proba(test_sds.features)[:, 1]
bl_test_pred = bl_test_probs > 0.5
```
## Load or learn intervention
So that you can reproduce our results we include a pretrained model, but the code for training your own model and experimenting with hyperparameters can be found below.
a) Location of the intervention previously learned on the training data.
```
TR = joblib.load(artifacts_dir / "models" / "finance" / "zemel-race.pkl")
```
b) Learn intervention of the training data.
```
# TR = LFR(
# unprivileged_groups=unprivileged_groups,
# privileged_groups=privileged_groups,
# k=5,
# Ax=0.01,
# Ay=1.0,
# Az=100.0,
# )
# TR = TR.fit(train_sds)
```
Apply intervention to test set.
```
transf_test_sds = TR.transform(test_sds)
test_fair_labels = transf_test_sds.labels.flatten()
```
Evaluate fairness and accuracy on test data.
```
bl_acc = bl_model.score(test.drop(columns="salary"), test.salary)
bl_dpd = demographic_parity_difference(
test.salary, bl_test_pred, sensitive_features=test.race_white,
)
bl_dpr = demographic_parity_ratio(
test.salary, bl_test_pred, sensitive_features=test.race_white,
)
acc = accuracy(test.salary, test_fair_labels)
dpd = demographic_parity_difference(
test.salary, test_fair_labels, sensitive_features=test.race_white,
)
dpr = demographic_parity_ratio(
test.salary, test_fair_labels, sensitive_features=test.race_white,
)
print(f"Baseline accuracy: {bl_acc:.3f}")
print(f"Accuracy: {acc:.3f}\n")
print(f"Baseline demographic parity difference: {bl_dpd:.3f}")
print(f"Demographic parity difference: {dpd:.3f}\n")
print(f"Baseline demographic parity ratio: {bl_dpr:.3f}")
print(f"Demographic parity ratio: {dpr:.3f}")
dp_bar = group_bar_plots(
np.concatenate([bl_test_pred, test_fair_labels]),
np.tile(test.race_white.map({0: "Black", 1: "White"}), 2),
groups=np.concatenate(
[np.zeros_like(bl_test_pred), np.ones_like(test_fair_labels)]
),
group_names=["Baseline", "Zemel"],
title="Proportion of predicted high earners by race",
xlabel="Propotion of predicted high earners",
ylabel="Method",
)
dp_bar
```
|
github_jupyter
|
from pathlib import Path
import joblib
import numpy as np
import pandas as pd
from aif360.algorithms.preprocessing.lfr import LFR # noqa
from aif360.datasets import StandardDataset
from fairlearn.metrics import (
demographic_parity_difference,
demographic_parity_ratio,
)
from helpers.metrics import accuracy
from helpers.plot import group_bar_plots
artifacts_dir = Path("../../../artifacts")
data_dir = artifacts_dir / "data" / "adult"
train = pd.read_csv(data_dir / "processed" / "train-one-hot.csv")
val = pd.read_csv(data_dir / "processed" / "val-one-hot.csv")
test = pd.read_csv(data_dir / "processed" / "test-one-hot.csv")
for data in [train, test, val]:
data = data[data.race_white + data.race_black == 1]
data.drop(
[
"race_amer_indian_eskimo",
"race_asian_pac_islander",
"race_other",
"race_black",
],
axis=1,
)
train_sds = StandardDataset(
train,
label_name="salary",
favorable_classes=[1],
protected_attribute_names=["race_white"],
privileged_classes=[[1]],
)
test_sds = StandardDataset(
test,
label_name="salary",
favorable_classes=[1],
protected_attribute_names=["race_white"],
privileged_classes=[[1]],
)
val_sds = StandardDataset(
val,
label_name="salary",
favorable_classes=[1],
protected_attribute_names=["race_white"],
privileged_classes=[[1]],
)
index = train_sds.feature_names.index("race_white")
privileged_groups = [{"race_white": 1.0}]
unprivileged_groups = [{"race_white": 0.0}]
bl_model = joblib.load(artifacts_dir / "models" / "finance" / "baseline.pkl")
bl_test_probs = bl_model.predict_proba(test_sds.features)[:, 1]
bl_test_pred = bl_test_probs > 0.5
TR = joblib.load(artifacts_dir / "models" / "finance" / "zemel-race.pkl")
# TR = LFR(
# unprivileged_groups=unprivileged_groups,
# privileged_groups=privileged_groups,
# k=5,
# Ax=0.01,
# Ay=1.0,
# Az=100.0,
# )
# TR = TR.fit(train_sds)
transf_test_sds = TR.transform(test_sds)
test_fair_labels = transf_test_sds.labels.flatten()
bl_acc = bl_model.score(test.drop(columns="salary"), test.salary)
bl_dpd = demographic_parity_difference(
test.salary, bl_test_pred, sensitive_features=test.race_white,
)
bl_dpr = demographic_parity_ratio(
test.salary, bl_test_pred, sensitive_features=test.race_white,
)
acc = accuracy(test.salary, test_fair_labels)
dpd = demographic_parity_difference(
test.salary, test_fair_labels, sensitive_features=test.race_white,
)
dpr = demographic_parity_ratio(
test.salary, test_fair_labels, sensitive_features=test.race_white,
)
print(f"Baseline accuracy: {bl_acc:.3f}")
print(f"Accuracy: {acc:.3f}\n")
print(f"Baseline demographic parity difference: {bl_dpd:.3f}")
print(f"Demographic parity difference: {dpd:.3f}\n")
print(f"Baseline demographic parity ratio: {bl_dpr:.3f}")
print(f"Demographic parity ratio: {dpr:.3f}")
dp_bar = group_bar_plots(
np.concatenate([bl_test_pred, test_fair_labels]),
np.tile(test.race_white.map({0: "Black", 1: "White"}), 2),
groups=np.concatenate(
[np.zeros_like(bl_test_pred), np.ones_like(test_fair_labels)]
),
group_names=["Baseline", "Zemel"],
title="Proportion of predicted high earners by race",
xlabel="Propotion of predicted high earners",
ylabel="Method",
)
dp_bar
| 0.643105 | 0.969613 |
# Unit 3 - Lesson 2 - Challenge - If a tree falls in the forest
---------------------
# Human Resources Analytics
## Source of data:
- [https://www.kaggle.com/jaishofficial/human-resources-analytics](https://www.kaggle.com/jaishofficial/human-resources-analytics)
-----------------------
# Contents
- [Import Modules and Load Data](#Import-Modules-and-Load-Data)
- [What Does the Data Look Like?](#What-Does-the-Data-Look-Like?)
- [Clean the Data](#Clean-the-Data)
- [Exploratory Data Analysis](#Exploratory-Data-Analysis)
- [Variables of Interest](#Variables-of-Interest)
- [Additional Features](#Additional-Features)
- [Classifiers: Decision Tree vs. Random Forest](#Classifiers:--Decision-Tree-vs.-Random-Forest)
-------------------
# Import Modules and Load Data
## Import modules and enable the display of plots in this notebook
```
from scipy.stats import ttest_ind
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
%matplotlib inline
```
## Ignore harmless seaborn warnings
```
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
```
## Load the dataset into a DataFrame
```
file = 'https://raw.githubusercontent.com/djrgit/coursework/master/thinkful/data_science/my_progress/unit_3_deeper_into_supervised_learning/HR_comma_sep.csv'
hr = pd.read_csv(file)
```
-------------------
# What Does the Data Look Like?
## What is the shape of the dataset?
```
hr.shape
```
## What information can be quickly learned about the dataset?
```
hr.info()
```
## How is the dataset structured?
```
hr.head()
```
--------------------
# Clean the Data
```
hr = hr.rename(index=str, columns={'average_montly_hours': 'avg_monthly_hours', 'sales': 'dept'})
hr.head()
# Convert categorical entries into numerical entries (low, medium, high --> 1, 2, 3)
def numerize(level):
if level == 'low':
level = 1
elif level == 'medium':
level = 2
elif level == 'high':
level = 3
return level
hr['salary'] = hr['salary'].apply(numerize)
hr.head()
```
------------------
# Exploratory Data Analysis
## What are some statistics for columns with numerical data?
```
hr.describe()
```
## How does the data vary in columns with numerical data?
```
hr.var().nlargest(10)
```
## What sorts of possible correlations in the data become more apparent in a pairplot?
```
# Declare that you want to make a scatterplot matrix.
g = sns.PairGrid(hr.dropna(), diag_sharey=False)
# Scatterplot.
g.map_upper(plt.scatter, alpha=.5)
# Fit line summarizing the linear relationship of the two variables.
g.map_lower(sns.regplot, scatter_kws=dict(alpha=0))
# Give information about the univariate distributions of the variables.
g.map_diag(sns.kdeplot, lw=3)
plt.show()
```
## What does a correlation matrix look like?
```
# Make the correlation matrix.
corrmat = hr.corr()
print(corrmat)
# Set up the matplotlib figure.
f, ax = plt.subplots(figsize=(12, 9))
# Draw the heatmap using seaborn.
sns.heatmap(corrmat, vmax=.8, square=True)
plt.show()
```
# Variables of Interest
## Select an outcome variable and then pick four or five other variables (one to two categorical, three to four continuous) to act as the basis for features. Explore the variables using the univariate and bivariate methods you've learned so far.
###### Outcome variable:
- 'left'
###### Categorical / Continuous variables:
- 'salary'
- 'dept'
- 'satisfaction_level'
- 'last_evaluation'
- 'number_project'
- 'avg_monthly_hours'
- 'time_spend_company'
## How many employees in each department fall into each salary tier?
```
fig, ax = plt.subplots(figsize=(12,6))
sns.countplot(x='dept', hue='salary', data=hr)
plt.show()
```
## How does attrition vary amongst different departments?
```
fig, ax = plt.subplots(figsize=(12,6))
sns.countplot(x='dept', hue='left', data=hr)
plt.show()
```
## How does satisfaction level vary among employees that left?
```
fig, ax = plt.subplots(figsize=(12,6))
sns.violinplot(x='left', y='satisfaction_level', data=hr)
plt.show()
```
## How do employees' last evaluations vary among employees that left?
```
fig, ax = plt.subplots(figsize=(12,6))
sns.violinplot(x='left', y='last_evaluation', data=hr)
plt.show()
```
## How does the number of projects vary among the employees that left?
```
fig, ax = plt.subplots(figsize=(12,6))
sns.violinplot(x='left', y='number_project', data=hr)
plt.show()
```
## How do employees' average monthly hours vary among employees that left?
```
fig, ax = plt.subplots(figsize=(12,6))
sns.violinplot(x='left', y='avg_monthly_hours', data=hr)
plt.show()
```
## How does attrition vary across different salary tiers?
```
fig, ax = plt.subplots(figsize=(12,6))
sns.violinplot(x='left', y='salary', data=hr)
plt.show()
```
## How does time spent with the company vary among employees that left?
```
fig, ax = plt.subplots(figsize=(12,6))
sns.violinplot(x='left', y='time_spend_company', data=hr)
plt.show()
```
# Additional Features
###### Adding 10 Features (possible features to help predict attrition)
1 - last_evaluation, number_project, avg_monthly_hours - somewhat correlated variables combined into one feature
2 - the above combined feature below its average - values below average may help predict attrition
3 - satisfaction_level below 0.5 - values below 0.5 may help predict attrition
4 - last_evaluation at/above average - values at/above average may help predict attrition
5 - number_project <= 3 - values <= 3 may help predict attrition
6 - avg_monthly_hours at/above average - values at/above average may help predict attrition
7 - time_spend_company at/above average - values at/above average may help predict attrition
8 - salary below average - values below average may help predict attrition
9 - dept != 'management' - employees not in management may be more likely to leave
10 - dept = 'sales' or 'technical' or 'support' - there may/may not be a difference in attrition rates for employees in these departments
```
means = hr[['last_evaluation','number_project','avg_monthly_hours']].mean(axis=0)
stds = hr[['last_evaluation','number_project','avg_monthly_hours']].std(axis=0)
hr['eval_proj_hours'] = ((hr[['last_evaluation','number_project','avg_monthly_hours']] - means) / stds).mean(axis=1)
hr['eval_proj_hours_gte_avg'] = np.where(hr['eval_proj_hours'] >= hr['eval_proj_hours'].mean(), 1, 0)
hr['sat_lvl_gte_avg'] = np.where(hr['satisfaction_level'] < 0.5, 1, 0)
hr['last_eval_gte_avg'] = np.where(hr['last_evaluation'] >= hr['last_evaluation'].mean(), 1, 0)
hr['num_proj_gte_avg'] = np.where(hr['number_project'] <= 3, 1, 0)
hr['monthly_hrs_gte_avg'] = np.where(hr['avg_monthly_hours'] >= hr['avg_monthly_hours'].mean(), 1, 0)
hr['time_w_co_gte_avg'] = np.where(hr['time_spend_company'] >= hr['time_spend_company'].mean(), 1, 0)
hr['sal_lt_avg'] = np.where(hr['salary'] < hr['salary'].mean(), 1, 0)
hr['dept_is_mgmt'] = np.where(hr['dept'] != 'management', 1, 0)
hr['dept_is_sales_tech_supp'] = np.where(hr['dept'].isin(['sales', 'technical', 'support']), 1, 0)
hr.head()
```
## What does a histogram of the combined 'eval_proj_hours' feature look like?
```
fig = plt.figure()
plt.hist(hr['eval_proj_hours'], bins=20)
```
## What does a correlation matrix look like with the 'eval_proj_hours' feature included?
```
plotdf = hr.loc[:, ['last_evaluation', 'number_project', 'avg_monthly_hours']]
plotdf['eval_proj_hours'] = hr['eval_proj_hours']
corrmat2 = plotdf.corr()
print(corrmat2)
```
# Classifiers: Decision Tree vs. Random Forest
## Comparing a Decision Tree Classifier with a Random Forest Classifier
```
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
import time
X = hr.drop('left', 1)
y = hr['left']
X = pd.get_dummies(X)
X.head()
```
### Decision Tree Classifier
```
dt_start_time = time.time()
dtree = DecisionTreeClassifier(max_depth=3, random_state=101)
dtree_scores = cross_val_score(dtree, X, y, cv=10)
dt_end_time = time.time()
print("Time elapsed: --- %s seconds ---" % (dt_end_time - dt_start_time))
print(dtree_scores)
dtree_avg = np.mean(dtree_scores)
print(dtree_avg)
```
### Random Forest Classifier
```
rfc_start_time = time.time()
rfc = RandomForestClassifier()
rfc_scores = cross_val_score(rfc, X, y, cv=10)
rfc_end_time = time.time()
print("Time elapsed: --- %s seconds ---" % (rfc_end_time - rfc_start_time))
print(rfc_scores)
avg = np.mean(rfc_scores)
print(avg)
```
|
github_jupyter
|
from scipy.stats import ttest_ind
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
%matplotlib inline
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
file = 'https://raw.githubusercontent.com/djrgit/coursework/master/thinkful/data_science/my_progress/unit_3_deeper_into_supervised_learning/HR_comma_sep.csv'
hr = pd.read_csv(file)
hr.shape
hr.info()
hr.head()
hr = hr.rename(index=str, columns={'average_montly_hours': 'avg_monthly_hours', 'sales': 'dept'})
hr.head()
# Convert categorical entries into numerical entries (low, medium, high --> 1, 2, 3)
def numerize(level):
if level == 'low':
level = 1
elif level == 'medium':
level = 2
elif level == 'high':
level = 3
return level
hr['salary'] = hr['salary'].apply(numerize)
hr.head()
hr.describe()
hr.var().nlargest(10)
# Declare that you want to make a scatterplot matrix.
g = sns.PairGrid(hr.dropna(), diag_sharey=False)
# Scatterplot.
g.map_upper(plt.scatter, alpha=.5)
# Fit line summarizing the linear relationship of the two variables.
g.map_lower(sns.regplot, scatter_kws=dict(alpha=0))
# Give information about the univariate distributions of the variables.
g.map_diag(sns.kdeplot, lw=3)
plt.show()
# Make the correlation matrix.
corrmat = hr.corr()
print(corrmat)
# Set up the matplotlib figure.
f, ax = plt.subplots(figsize=(12, 9))
# Draw the heatmap using seaborn.
sns.heatmap(corrmat, vmax=.8, square=True)
plt.show()
fig, ax = plt.subplots(figsize=(12,6))
sns.countplot(x='dept', hue='salary', data=hr)
plt.show()
fig, ax = plt.subplots(figsize=(12,6))
sns.countplot(x='dept', hue='left', data=hr)
plt.show()
fig, ax = plt.subplots(figsize=(12,6))
sns.violinplot(x='left', y='satisfaction_level', data=hr)
plt.show()
fig, ax = plt.subplots(figsize=(12,6))
sns.violinplot(x='left', y='last_evaluation', data=hr)
plt.show()
fig, ax = plt.subplots(figsize=(12,6))
sns.violinplot(x='left', y='number_project', data=hr)
plt.show()
fig, ax = plt.subplots(figsize=(12,6))
sns.violinplot(x='left', y='avg_monthly_hours', data=hr)
plt.show()
fig, ax = plt.subplots(figsize=(12,6))
sns.violinplot(x='left', y='salary', data=hr)
plt.show()
fig, ax = plt.subplots(figsize=(12,6))
sns.violinplot(x='left', y='time_spend_company', data=hr)
plt.show()
means = hr[['last_evaluation','number_project','avg_monthly_hours']].mean(axis=0)
stds = hr[['last_evaluation','number_project','avg_monthly_hours']].std(axis=0)
hr['eval_proj_hours'] = ((hr[['last_evaluation','number_project','avg_monthly_hours']] - means) / stds).mean(axis=1)
hr['eval_proj_hours_gte_avg'] = np.where(hr['eval_proj_hours'] >= hr['eval_proj_hours'].mean(), 1, 0)
hr['sat_lvl_gte_avg'] = np.where(hr['satisfaction_level'] < 0.5, 1, 0)
hr['last_eval_gte_avg'] = np.where(hr['last_evaluation'] >= hr['last_evaluation'].mean(), 1, 0)
hr['num_proj_gte_avg'] = np.where(hr['number_project'] <= 3, 1, 0)
hr['monthly_hrs_gte_avg'] = np.where(hr['avg_monthly_hours'] >= hr['avg_monthly_hours'].mean(), 1, 0)
hr['time_w_co_gte_avg'] = np.where(hr['time_spend_company'] >= hr['time_spend_company'].mean(), 1, 0)
hr['sal_lt_avg'] = np.where(hr['salary'] < hr['salary'].mean(), 1, 0)
hr['dept_is_mgmt'] = np.where(hr['dept'] != 'management', 1, 0)
hr['dept_is_sales_tech_supp'] = np.where(hr['dept'].isin(['sales', 'technical', 'support']), 1, 0)
hr.head()
fig = plt.figure()
plt.hist(hr['eval_proj_hours'], bins=20)
plotdf = hr.loc[:, ['last_evaluation', 'number_project', 'avg_monthly_hours']]
plotdf['eval_proj_hours'] = hr['eval_proj_hours']
corrmat2 = plotdf.corr()
print(corrmat2)
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
import time
X = hr.drop('left', 1)
y = hr['left']
X = pd.get_dummies(X)
X.head()
dt_start_time = time.time()
dtree = DecisionTreeClassifier(max_depth=3, random_state=101)
dtree_scores = cross_val_score(dtree, X, y, cv=10)
dt_end_time = time.time()
print("Time elapsed: --- %s seconds ---" % (dt_end_time - dt_start_time))
print(dtree_scores)
dtree_avg = np.mean(dtree_scores)
print(dtree_avg)
rfc_start_time = time.time()
rfc = RandomForestClassifier()
rfc_scores = cross_val_score(rfc, X, y, cv=10)
rfc_end_time = time.time()
print("Time elapsed: --- %s seconds ---" % (rfc_end_time - rfc_start_time))
print(rfc_scores)
avg = np.mean(rfc_scores)
print(avg)
| 0.677474 | 0.987993 |
# Chemical-Disease Relation (CDR) Tutorial
In this example, we'll be writing an application to extract *mentions of* **chemical-induced-disease relationships** from Pubmed abstracts, as per the [BioCreative CDR Challenge](http://www.biocreative.org/resources/corpora/biocreative-v-cdr-corpus/). This tutorial will show off some of the more advanced features of Snorkel, so we'll assume you've followed the Intro tutorial.
### Task Description
The CDR task is comprised of three sets of 500 documents each, called training, development, and test. A document consists of the title and abstract of an article from [PubMed](https://www.ncbi.nlm.nih.gov/pubmed/), an archive of biomedical and life sciences journal literature. The documents have been hand-annotated with
* Mentions of chemicals and diseases along with their [MESH](https://meshb.nlm.nih.gov/#/fieldSearch) IDs, canonical IDs for medical entities. For example, mentions of "warfarin" in two different documents will have the same ID.
* Chemical-disease relations at the document-level. That is, if some piece of text in the document implies that a chemical with MESH ID `X` induces a disease with MESH ID `Y`, the document will be annotated with `Relation(X, Y)`.
The goal is to extract the document-level relations on the test set (without accessing the entity or relation annotations). For this tutorial, we make the following assumptions and alterations to the task:
* We discard all of the entity mention annotations and assume we have access to a state-of-the-art entity tagger (see Part I) to identify chemical and disease mentions, and link them to their canonical IDs.
* We shuffle the training and development sets a bit, producing a new training set with 900 documents and a new development set with 100 documents. We discard the training set relation annotations, but keep the development set to evaluate our labeling functions and extraction model.
* We evaluate the task at the mention-level, rather than the document-level. We will convert the document-level relation annotations to mention-level by simply saying that a mention pair `(X, Y)` in document `D` if `Relation(X, Y)` was hand-annotated at the document-level for `D`.
In effect, the only inputs to this application are the plain text of the documents, a pre-trained entity tagger, and a small development set of annotated documents. This is representative of many information extraction tasks, and Snorkel is the perfect tool to bootstrap the extraction process with weak supervision. Let's get going.
## Part 0: Initial Prep
In your shell, download the raw data by running:
```bash
cd tutorials/cdr
./download_data.sh
```
Note that if you've previously run this tutorial (using SQLite), you can delete the old database by running (in the same directory as above):
```bash
rm snorkel.db
```
# Part I: Corpus Preprocessing
```
%load_ext autoreload
%autoreload 2
%matplotlib inline
from snorkel import SnorkelSession
session = SnorkelSession()
```
### Configuring a `DocPreprocessor`
We'll start by defining a `DocPreprocessor` object to read in Pubmed abstracts from [Pubtator]([Pubtator](http://www.ncbi.nlm.nih.gov/CBBresearch/Lu/Demo/PubTator/index.cgi). There some extra annotation information in the file, while we'll skip for now. We'll use the `XMLMultiDocPreprocessor` class, which allows us to use [XPath queries](https://en.wikipedia.org/wiki/XPath) to specify the relevant sections of the XML format.
Note that we are newline-concatenating text from the title and abstract together for simplicity, but if we wanted to, we could easily extend the `DocPreprocessor` classes to preserve information about document structure.
```
import os
from snorkel.parser import XMLMultiDocPreprocessor
# The following line is for testing only. Feel free to ignore it.
file_path = 'data/CDR.BioC.small.xml' if 'CI' in os.environ else 'data/CDR.BioC.xml'
doc_preprocessor = XMLMultiDocPreprocessor(
path=file_path,
doc='.//document',
text='.//passage/text/text()',
id='.//id/text()'
)
```
### Creating a `CorpusParser`
Similar to the Intro tutorial, we'll now construct a `CorpusParser` using the preprocessor we just defined. However, this one has an extra ingredient: an entity tagger. [TaggerOne](https://www.ncbi.nlm.nih.gov/pubmed/27283952) is a popular entity tagger for PubMed, so we went ahead and preprocessed its tags on the CDR corpus for you. The function `TaggerOneTagger.tag` (in `utils.py`) tags sentences with mentions of chemicals and diseases. We'll use these tags to extract candidates in Part II. The tags are stored in `Sentence.entity_cids` and `Sentence.entity_types`, which are analog to `Sentence.words`.
Recall that in the wild, we wouldn't have the manual labels included with the CDR data, and we'd have to use an automated tagger (like TaggerOne) to tag entity mentions. That's what we're doing here.
```
from snorkel.parser import CorpusParser
from utils import TaggerOneTagger
tagger_one = TaggerOneTagger()
corpus_parser = CorpusParser(fn=tagger_one.tag)
corpus_parser.apply(list(doc_preprocessor))
from snorkel.models import Document, Sentence
print("Documents:", session.query(Document).count())
print("Sentences:", session.query(Sentence).count())
```
# Part II: Candidate Extraction
With the TaggerOne entity tags, candidate extraction is pretty easy! We split into some preset training, development, and test sets. Then we'll use PretaggedCandidateExtractor to extract candidates using the TaggerOne entity tags.
```
from six.moves.cPickle import load
with open('data/doc_ids.pkl', 'rb') as f:
train_ids, dev_ids, test_ids = load(f)
train_ids, dev_ids, test_ids = set(train_ids), set(dev_ids), set(test_ids)
train_sents, dev_sents, test_sents = set(), set(), set()
docs = session.query(Document).order_by(Document.name).all()
for i, doc in enumerate(docs):
for s in doc.sentences:
if doc.name in train_ids:
train_sents.add(s)
elif doc.name in dev_ids:
dev_sents.add(s)
elif doc.name in test_ids:
test_sents.add(s)
else:
raise Exception('ID <{0}> not found in any id set'.format(doc.name))
from snorkel.models import Candidate, candidate_subclass
ChemicalDisease = candidate_subclass('ChemicalDisease', ['chemical', 'disease'])
from snorkel.candidates import PretaggedCandidateExtractor
candidate_extractor = PretaggedCandidateExtractor(ChemicalDisease, ['Chemical', 'Disease'])
```
We should get 8268 candidates in the training set, 888 candidates in the development set, and 4620 candidates in the test set.
```
for k, sents in enumerate([train_sents, dev_sents, test_sents]):
candidate_extractor.apply(sents, split=k)
print("Number of candidates:", session.query(ChemicalDisease).filter(ChemicalDisease.split == k).count())
```
### Candidate Recall
We will briefly discuss the issue of candidate recall. The end-recall of the extraction is effectively upper-bounded by our candidate set: any chemical-disease pair that is present in a document but not identified as a candidate cannot be extracted by our end extraction model. Below are some example reasons for missing a candidate<sup>1</sup>.
* The tagger is imperfect, and may miss a chemical or disease mention.
* The tagger is imperfect, and may attach an incorrect entity ID to a correctly identified chemical or disease mention. For example, "stomach pain" might get attached to the entity ID for "digestive track infection" rather than "stomach illness".
* A relation occurs across multiple sentences. For example, "**Artery calcification** is more prominient in older populations. It can be induced by **warfarin**."
If we just look at the set of extractions at the end of this tutorial, we won't be able to account for some false negatives that we missed at the candidate extraction stage. For simplicity, we ignore candidate recall in this tutorial and evaluate our extraction model just on the set of extractions made by the end model. However, when you're developing information extraction applications in the future, it's important to keep candidate recall in mind.
<sup>1</sup>Note that these specific issues can be combatted with advanced techniques like noun-phrase chunking to expand the entity mention set, or coreference parsing for cross-sentence candidates. We don't employ these here in order to focus on weak supervision.
|
github_jupyter
|
cd tutorials/cdr
./download_data.sh
rm snorkel.db
%load_ext autoreload
%autoreload 2
%matplotlib inline
from snorkel import SnorkelSession
session = SnorkelSession()
import os
from snorkel.parser import XMLMultiDocPreprocessor
# The following line is for testing only. Feel free to ignore it.
file_path = 'data/CDR.BioC.small.xml' if 'CI' in os.environ else 'data/CDR.BioC.xml'
doc_preprocessor = XMLMultiDocPreprocessor(
path=file_path,
doc='.//document',
text='.//passage/text/text()',
id='.//id/text()'
)
from snorkel.parser import CorpusParser
from utils import TaggerOneTagger
tagger_one = TaggerOneTagger()
corpus_parser = CorpusParser(fn=tagger_one.tag)
corpus_parser.apply(list(doc_preprocessor))
from snorkel.models import Document, Sentence
print("Documents:", session.query(Document).count())
print("Sentences:", session.query(Sentence).count())
from six.moves.cPickle import load
with open('data/doc_ids.pkl', 'rb') as f:
train_ids, dev_ids, test_ids = load(f)
train_ids, dev_ids, test_ids = set(train_ids), set(dev_ids), set(test_ids)
train_sents, dev_sents, test_sents = set(), set(), set()
docs = session.query(Document).order_by(Document.name).all()
for i, doc in enumerate(docs):
for s in doc.sentences:
if doc.name in train_ids:
train_sents.add(s)
elif doc.name in dev_ids:
dev_sents.add(s)
elif doc.name in test_ids:
test_sents.add(s)
else:
raise Exception('ID <{0}> not found in any id set'.format(doc.name))
from snorkel.models import Candidate, candidate_subclass
ChemicalDisease = candidate_subclass('ChemicalDisease', ['chemical', 'disease'])
from snorkel.candidates import PretaggedCandidateExtractor
candidate_extractor = PretaggedCandidateExtractor(ChemicalDisease, ['Chemical', 'Disease'])
for k, sents in enumerate([train_sents, dev_sents, test_sents]):
candidate_extractor.apply(sents, split=k)
print("Number of candidates:", session.query(ChemicalDisease).filter(ChemicalDisease.split == k).count())
| 0.248443 | 0.986205 |
```
# default_exp models.MINIROCKET
```
# MINIROCKET
> A Very Fast (Almost) Deterministic Transform for Time Series Classification.
```
#export
from tsai.imports import *
from tsai.utils import *
from tsai.data.external import *
from tsai.models.layers import *
#export
from sktime.transformations.panel.rocket import MiniRocketMultivariate
from sklearn.linear_model import RidgeCV, RidgeClassifierCV
from sklearn.ensemble import VotingClassifier, VotingRegressor
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
#export
class MiniRocketClassifier(sklearn.pipeline.Pipeline):
"""Time series classification using MINIROCKET features and a linear classifier"""
def __init__(self, num_features=10_000, max_dilations_per_kernel=32, random_state=None,
alphas=np.logspace(-3, 3, 7), normalize_features=True, memory=None, verbose=False, scoring=None, class_weight=None, **kwargs):
""" MiniRocketClassifier is recommended for up to 10k time series.
For a larger dataset, you can use MINIROCKET (in Pytorch).
scoring = None --> defaults to accuracy.
"""
# Issue caused by sktime when upgraded 0.9.0 (changed num_features to num_kernels was resolved by
# Siva Sai (SivaAndMe in GiHub)https://github.com/timeseriesAI/tsai/pull/306)
self.steps = [('minirocketmultivariate', MiniRocketMultivariate(num_kernels=num_features,
max_dilations_per_kernel=max_dilations_per_kernel,
random_state=random_state)),
('ridgeclassifiercv', RidgeClassifierCV(alphas=alphas,
normalize=normalize_features,
scoring=scoring,
class_weight=class_weight,
**kwargs))]
store_attr()
self._validate_steps()
def __repr__(self):
return f'Pipeline(steps={self.steps.copy()})'
def save(self, fname=None, path='./models'):
fname = ifnone(fname, 'MiniRocketClassifier')
path = Path(path)
filename = path/fname
filename.parent.mkdir(parents=True, exist_ok=True)
with open(f'{filename}.pkl', 'wb') as output:
pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)
#export
def load_minirocket(fname, path='./models'):
path = Path(path)
filename = path/fname
with open(f'{filename}.pkl', 'rb') as input:
output = pickle.load(input)
return output
#export
class MiniRocketRegressor(sklearn.pipeline.Pipeline):
"""Time series regression using MINIROCKET features and a linear regressor"""
def __init__(self, num_features=10000, max_dilations_per_kernel=32, random_state=None,
alphas=np.logspace(-3, 3, 7), *, normalize_features=True, memory=None, verbose=False, scoring=None, **kwargs):
""" MiniRocketRegressor is recommended for up to 10k time series.
For a larger dataset, you can use MINIROCKET (in Pytorch).
scoring = None --> defaults to r2.
"""
# Issue caused by sktime when upgraded 0.9.0 (changed num_features to num_kernels was resolved by
# Siva Sai (SivaAndMe in GiHub)https://github.com/timeseriesAI/tsai/pull/306)
self.steps = [('minirocketmultivariate', MiniRocketMultivariate(num_kernels=num_features,
max_dilations_per_kernel=max_dilations_per_kernel,
random_state=random_state)),
('ridgecv', RidgeCV(alphas=alphas, normalize=normalize_features, scoring=scoring, **kwargs))]
store_attr()
self._validate_steps()
def __repr__(self):
return f'Pipeline(steps={self.steps.copy()})'
def save(self, fname=None, path='./models'):
fname = ifnone(fname, 'MiniRocketRegressor')
path = Path(path)
filename = path/fname
filename.parent.mkdir(parents=True, exist_ok=True)
with open(f'{filename}.pkl', 'wb') as output:
pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)
#export
def load_minirocket(fname, path='./models'):
path = Path(path)
filename = path/fname
with open(f'{filename}.pkl', 'rb') as input:
output = pickle.load(input)
return output
#export
class MiniRocketVotingClassifier(VotingClassifier):
"""Time series classification ensemble using MINIROCKET features, a linear classifier and majority voting"""
def __init__(self, n_estimators=5, weights=None, n_jobs=-1, num_features=10_000, max_dilations_per_kernel=32, random_state=None,
alphas=np.logspace(-3, 3, 7), normalize_features=True, memory=None, verbose=False, scoring=None, class_weight=None, **kwargs):
store_attr()
estimators = [(f'est_{i}', MiniRocketClassifier(num_features=num_features, max_dilations_per_kernel=max_dilations_per_kernel,
random_state=random_state, alphas=alphas, normalize_features=normalize_features, memory=memory,
verbose=verbose, scoring=scoring, class_weight=class_weight, **kwargs))
for i in range(n_estimators)]
super().__init__(estimators, voting='hard', weights=weights, n_jobs=n_jobs, verbose=verbose)
def __repr__(self):
return f'MiniRocketVotingClassifier(n_estimators={self.n_estimators}, \nsteps={self.estimators[0][1].steps})'
def save(self, fname=None, path='./models'):
fname = ifnone(fname, 'MiniRocketVotingClassifier')
path = Path(path)
filename = path/fname
filename.parent.mkdir(parents=True, exist_ok=True)
with open(f'{filename}.pkl', 'wb') as output:
pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)
#export
def get_minirocket_preds(X, fname, path='./models', model=None):
if X.ndim == 1: X = X[np.newaxis][np.newaxis]
elif X.ndim == 2: X = X[np.newaxis]
if model is None:
model = load_minirocket(fname=fname, path=path)
return model.predict(X)
#export
class MiniRocketVotingRegressor(VotingRegressor):
"""Time series regression ensemble using MINIROCKET features, a linear regressor and a voting regressor"""
def __init__(self, n_estimators=5, weights=None, n_jobs=-1, num_features=10_000, max_dilations_per_kernel=32, random_state=None,
alphas=np.logspace(-3, 3, 7), normalize_features=True, memory=None, verbose=False, scoring=None, **kwargs):
store_attr()
estimators = [(f'est_{i}', MiniRocketRegressor(num_features=num_features, max_dilations_per_kernel=max_dilations_per_kernel,
random_state=random_state, alphas=alphas, normalize_features=normalize_features, memory=memory,
verbose=verbose, scoring=scoring, **kwargs))
for i in range(n_estimators)]
super().__init__(estimators, weights=weights, n_jobs=n_jobs, verbose=verbose)
def __repr__(self):
return f'MiniRocketVotingRegressor(n_estimators={self.n_estimators}, \nsteps={self.estimators[0][1].steps})'
def save(self, fname=None, path='./models'):
fname = ifnone(fname, 'MiniRocketVotingRegressor')
path = Path(path)
filename = path/fname
filename.parent.mkdir(parents=True, exist_ok=True)
with open(f'{filename}.pkl', 'wb') as output:
pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)
# Univariate classification with sklearn-type API
dsid = 'OliveOil'
fname = 'MiniRocketClassifier'
X_train, y_train, X_test, y_test = get_UCR_data(dsid)
cls = MiniRocketClassifier()
cls.fit(X_train, y_train)
cls.save(fname)
pred = cls.score(X_test, y_test)
del cls
cls = load_minirocket(fname)
test_eq(cls.score(X_test, y_test), pred)
# Multivariate classification with sklearn-type API
dsid = 'NATOPS'
X_train, y_train, X_test, y_test = get_UCR_data(dsid)
cls = MiniRocketClassifier()
cls.fit(X_train, y_train)
cls.score(X_test, y_test)
# Multivariate classification with sklearn-type API
dsid = 'NATOPS'
X_train, y_train, X_test, y_test = get_UCR_data(dsid)
cls = MiniRocketVotingClassifier(5)
cls.fit(X_train, y_train)
cls.score(X_test, y_test)
# Univariate regression with sklearn-type API
from sklearn.metrics import mean_squared_error
dsid = 'Covid3Month'
fname = 'MiniRocketRegressor'
X_train, y_train, X_test, y_test = get_Monash_regression_data(dsid)
if X_train is not None:
rmse_scorer = make_scorer(mean_squared_error, greater_is_better=False)
reg = MiniRocketRegressor(scoring=rmse_scorer)
reg.fit(X_train, y_train)
reg.save(fname)
del reg
reg = load_minirocket(fname)
y_pred = reg.predict(X_test)
print(mean_squared_error(y_test, y_pred, squared=False))
# Multivariate regression with sklearn-type API
from sklearn.metrics import mean_squared_error
dsid = 'AppliancesEnergy'
X_train, y_train, X_test, y_test = get_Monash_regression_data(dsid)
if X_train is not None:
rmse_scorer = make_scorer(mean_squared_error, greater_is_better=False)
reg = MiniRocketRegressor(scoring=rmse_scorer)
reg.fit(X_train, y_train)
reg.save(fname)
del reg
reg = load_minirocket(fname)
y_pred = reg.predict(X_test)
print(mean_squared_error(y_test, y_pred, squared=False))
# Multivariate regression ensemble with sklearn-type API
if X_train is not None:
reg = MiniRocketVotingRegressor(5, scoring=rmse_scorer)
reg.fit(X_train, y_train)
y_pred = reg.predict(X_test)
print(mean_squared_error(y_test, y_pred, squared=False))
#hide
from tsai.imports import create_scripts
from tsai.export import get_nb_name
nb_name = get_nb_name()
create_scripts(nb_name);
```
|
github_jupyter
|
# default_exp models.MINIROCKET
#export
from tsai.imports import *
from tsai.utils import *
from tsai.data.external import *
from tsai.models.layers import *
#export
from sktime.transformations.panel.rocket import MiniRocketMultivariate
from sklearn.linear_model import RidgeCV, RidgeClassifierCV
from sklearn.ensemble import VotingClassifier, VotingRegressor
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
#export
class MiniRocketClassifier(sklearn.pipeline.Pipeline):
"""Time series classification using MINIROCKET features and a linear classifier"""
def __init__(self, num_features=10_000, max_dilations_per_kernel=32, random_state=None,
alphas=np.logspace(-3, 3, 7), normalize_features=True, memory=None, verbose=False, scoring=None, class_weight=None, **kwargs):
""" MiniRocketClassifier is recommended for up to 10k time series.
For a larger dataset, you can use MINIROCKET (in Pytorch).
scoring = None --> defaults to accuracy.
"""
# Issue caused by sktime when upgraded 0.9.0 (changed num_features to num_kernels was resolved by
# Siva Sai (SivaAndMe in GiHub)https://github.com/timeseriesAI/tsai/pull/306)
self.steps = [('minirocketmultivariate', MiniRocketMultivariate(num_kernels=num_features,
max_dilations_per_kernel=max_dilations_per_kernel,
random_state=random_state)),
('ridgeclassifiercv', RidgeClassifierCV(alphas=alphas,
normalize=normalize_features,
scoring=scoring,
class_weight=class_weight,
**kwargs))]
store_attr()
self._validate_steps()
def __repr__(self):
return f'Pipeline(steps={self.steps.copy()})'
def save(self, fname=None, path='./models'):
fname = ifnone(fname, 'MiniRocketClassifier')
path = Path(path)
filename = path/fname
filename.parent.mkdir(parents=True, exist_ok=True)
with open(f'{filename}.pkl', 'wb') as output:
pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)
#export
def load_minirocket(fname, path='./models'):
path = Path(path)
filename = path/fname
with open(f'{filename}.pkl', 'rb') as input:
output = pickle.load(input)
return output
#export
class MiniRocketRegressor(sklearn.pipeline.Pipeline):
"""Time series regression using MINIROCKET features and a linear regressor"""
def __init__(self, num_features=10000, max_dilations_per_kernel=32, random_state=None,
alphas=np.logspace(-3, 3, 7), *, normalize_features=True, memory=None, verbose=False, scoring=None, **kwargs):
""" MiniRocketRegressor is recommended for up to 10k time series.
For a larger dataset, you can use MINIROCKET (in Pytorch).
scoring = None --> defaults to r2.
"""
# Issue caused by sktime when upgraded 0.9.0 (changed num_features to num_kernels was resolved by
# Siva Sai (SivaAndMe in GiHub)https://github.com/timeseriesAI/tsai/pull/306)
self.steps = [('minirocketmultivariate', MiniRocketMultivariate(num_kernels=num_features,
max_dilations_per_kernel=max_dilations_per_kernel,
random_state=random_state)),
('ridgecv', RidgeCV(alphas=alphas, normalize=normalize_features, scoring=scoring, **kwargs))]
store_attr()
self._validate_steps()
def __repr__(self):
return f'Pipeline(steps={self.steps.copy()})'
def save(self, fname=None, path='./models'):
fname = ifnone(fname, 'MiniRocketRegressor')
path = Path(path)
filename = path/fname
filename.parent.mkdir(parents=True, exist_ok=True)
with open(f'{filename}.pkl', 'wb') as output:
pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)
#export
def load_minirocket(fname, path='./models'):
path = Path(path)
filename = path/fname
with open(f'{filename}.pkl', 'rb') as input:
output = pickle.load(input)
return output
#export
class MiniRocketVotingClassifier(VotingClassifier):
"""Time series classification ensemble using MINIROCKET features, a linear classifier and majority voting"""
def __init__(self, n_estimators=5, weights=None, n_jobs=-1, num_features=10_000, max_dilations_per_kernel=32, random_state=None,
alphas=np.logspace(-3, 3, 7), normalize_features=True, memory=None, verbose=False, scoring=None, class_weight=None, **kwargs):
store_attr()
estimators = [(f'est_{i}', MiniRocketClassifier(num_features=num_features, max_dilations_per_kernel=max_dilations_per_kernel,
random_state=random_state, alphas=alphas, normalize_features=normalize_features, memory=memory,
verbose=verbose, scoring=scoring, class_weight=class_weight, **kwargs))
for i in range(n_estimators)]
super().__init__(estimators, voting='hard', weights=weights, n_jobs=n_jobs, verbose=verbose)
def __repr__(self):
return f'MiniRocketVotingClassifier(n_estimators={self.n_estimators}, \nsteps={self.estimators[0][1].steps})'
def save(self, fname=None, path='./models'):
fname = ifnone(fname, 'MiniRocketVotingClassifier')
path = Path(path)
filename = path/fname
filename.parent.mkdir(parents=True, exist_ok=True)
with open(f'{filename}.pkl', 'wb') as output:
pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)
#export
def get_minirocket_preds(X, fname, path='./models', model=None):
if X.ndim == 1: X = X[np.newaxis][np.newaxis]
elif X.ndim == 2: X = X[np.newaxis]
if model is None:
model = load_minirocket(fname=fname, path=path)
return model.predict(X)
#export
class MiniRocketVotingRegressor(VotingRegressor):
"""Time series regression ensemble using MINIROCKET features, a linear regressor and a voting regressor"""
def __init__(self, n_estimators=5, weights=None, n_jobs=-1, num_features=10_000, max_dilations_per_kernel=32, random_state=None,
alphas=np.logspace(-3, 3, 7), normalize_features=True, memory=None, verbose=False, scoring=None, **kwargs):
store_attr()
estimators = [(f'est_{i}', MiniRocketRegressor(num_features=num_features, max_dilations_per_kernel=max_dilations_per_kernel,
random_state=random_state, alphas=alphas, normalize_features=normalize_features, memory=memory,
verbose=verbose, scoring=scoring, **kwargs))
for i in range(n_estimators)]
super().__init__(estimators, weights=weights, n_jobs=n_jobs, verbose=verbose)
def __repr__(self):
return f'MiniRocketVotingRegressor(n_estimators={self.n_estimators}, \nsteps={self.estimators[0][1].steps})'
def save(self, fname=None, path='./models'):
fname = ifnone(fname, 'MiniRocketVotingRegressor')
path = Path(path)
filename = path/fname
filename.parent.mkdir(parents=True, exist_ok=True)
with open(f'{filename}.pkl', 'wb') as output:
pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)
# Univariate classification with sklearn-type API
dsid = 'OliveOil'
fname = 'MiniRocketClassifier'
X_train, y_train, X_test, y_test = get_UCR_data(dsid)
cls = MiniRocketClassifier()
cls.fit(X_train, y_train)
cls.save(fname)
pred = cls.score(X_test, y_test)
del cls
cls = load_minirocket(fname)
test_eq(cls.score(X_test, y_test), pred)
# Multivariate classification with sklearn-type API
dsid = 'NATOPS'
X_train, y_train, X_test, y_test = get_UCR_data(dsid)
cls = MiniRocketClassifier()
cls.fit(X_train, y_train)
cls.score(X_test, y_test)
# Multivariate classification with sklearn-type API
dsid = 'NATOPS'
X_train, y_train, X_test, y_test = get_UCR_data(dsid)
cls = MiniRocketVotingClassifier(5)
cls.fit(X_train, y_train)
cls.score(X_test, y_test)
# Univariate regression with sklearn-type API
from sklearn.metrics import mean_squared_error
dsid = 'Covid3Month'
fname = 'MiniRocketRegressor'
X_train, y_train, X_test, y_test = get_Monash_regression_data(dsid)
if X_train is not None:
rmse_scorer = make_scorer(mean_squared_error, greater_is_better=False)
reg = MiniRocketRegressor(scoring=rmse_scorer)
reg.fit(X_train, y_train)
reg.save(fname)
del reg
reg = load_minirocket(fname)
y_pred = reg.predict(X_test)
print(mean_squared_error(y_test, y_pred, squared=False))
# Multivariate regression with sklearn-type API
from sklearn.metrics import mean_squared_error
dsid = 'AppliancesEnergy'
X_train, y_train, X_test, y_test = get_Monash_regression_data(dsid)
if X_train is not None:
rmse_scorer = make_scorer(mean_squared_error, greater_is_better=False)
reg = MiniRocketRegressor(scoring=rmse_scorer)
reg.fit(X_train, y_train)
reg.save(fname)
del reg
reg = load_minirocket(fname)
y_pred = reg.predict(X_test)
print(mean_squared_error(y_test, y_pred, squared=False))
# Multivariate regression ensemble with sklearn-type API
if X_train is not None:
reg = MiniRocketVotingRegressor(5, scoring=rmse_scorer)
reg.fit(X_train, y_train)
y_pred = reg.predict(X_test)
print(mean_squared_error(y_test, y_pred, squared=False))
#hide
from tsai.imports import create_scripts
from tsai.export import get_nb_name
nb_name = get_nb_name()
create_scripts(nb_name);
| 0.780537 | 0.679458 |
#RNNs
We will use Recurrent Neural Networks, and in particular LSTMs, to perform sentiment analysis in Keras. Conveniently, Keras has a built-in IMDb movie reviews dataset that we can use.
```
%tensorflow_version 2.x
from keras.datasets import imdb
import warnings
vocabulary_size = 5000
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words = vocabulary_size)
print('Loaded dataset with {} training samples, {} test samples'.format(len(X_train), len(X_test)))
```
Inspect a sample review and its label
```
print('---review---')
print(X_train[6])
print('---label---')
print(y_train[6])
```
Map word IDs back to words
```
word2id = imdb.get_word_index()
id2word = {i: word for word, i in word2id.items()}
print('---review with words---')
print([id2word.get(i, ' ') for i in X_train[6]])
print('---label---')
print(y_train[6])
```
Maximum review length and minimum review length
```
print('Maximum review length: {}'.format(
len(max((X_train + X_test), key=len))))
print('Minimum review length: {}'.format(
len(min((X_test + X_test), key=len))))
```
##Pad sequences
In order to feed this data into our RNN, all input documents must have the same length. We will limit the maximum review length to max_words by truncating longer reviews and padding shorter reviews with a null value (0). We can accomplish this using the pad_sequences() function in Keras. For now, set max_words to 500.
```
from keras.preprocessing import sequence
max_words = 500
X_train = sequence.pad_sequences(X_train, maxlen=max_words)
X_test = sequence.pad_sequences(X_test, maxlen=max_words)
```
##TODO: Design an RNN model for sentiment analysis
Build our model architecture in the code cell below. We have imported some layers from Keras that you might need but feel free to use any other layers / transformations you like.
Remember that our input is a sequence of words (technically, integer word IDs) of maximum length = max_words, and our output is a binary sentiment label (0 or 1).
```
from keras import Sequential
from keras.layers import Embedding, LSTM, Dense, Dropout
embedding_size=32
model=Sequential()
model.add(Embedding(vocabulary_size, embedding_size, input_length=max_words))
model.add(LSTM(100))
model.add(Dense(1, activation='sigmoid'))
print(model.summary())
```
To summarize, our model is a simple RNN model with 1 embedding, 1 LSTM and 1 dense layers. 213,301 parameters in total need to be trained.
##Train and evaluate our model
We first need to compile our model by specifying the loss function and optimizer we want to use while training, as well as any evaluation metrics we'd like to measure. Specify the approprate parameters, including at least one metric 'accuracy'.
```
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
```
Once compiled, we can kick off the training process. There are two important training parameters that we have to specify - batch size and number of training epochs, which together with our model architecture determine the total training time.
Training may take a while, so grab a cup of coffee, or better, go for a run!
```
batch_size = 64
num_epochs = 3
X_valid, y_valid = X_train[:batch_size], y_train[:batch_size]
X_train2, y_train2 = X_train[batch_size:], y_train[batch_size:]
model.fit(X_train2, y_train2, validation_data=(X_valid, y_valid), batch_size=batch_size, epochs=num_epochs)
```
scores[1] will correspond to accuracy if we pass metrics=['accuracy']
```
scores = model.evaluate(X_test, y_test, verbose=0)
print('Test accuracy:', scores[1])
```
|
github_jupyter
|
%tensorflow_version 2.x
from keras.datasets import imdb
import warnings
vocabulary_size = 5000
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words = vocabulary_size)
print('Loaded dataset with {} training samples, {} test samples'.format(len(X_train), len(X_test)))
print('---review---')
print(X_train[6])
print('---label---')
print(y_train[6])
word2id = imdb.get_word_index()
id2word = {i: word for word, i in word2id.items()}
print('---review with words---')
print([id2word.get(i, ' ') for i in X_train[6]])
print('---label---')
print(y_train[6])
print('Maximum review length: {}'.format(
len(max((X_train + X_test), key=len))))
print('Minimum review length: {}'.format(
len(min((X_test + X_test), key=len))))
from keras.preprocessing import sequence
max_words = 500
X_train = sequence.pad_sequences(X_train, maxlen=max_words)
X_test = sequence.pad_sequences(X_test, maxlen=max_words)
from keras import Sequential
from keras.layers import Embedding, LSTM, Dense, Dropout
embedding_size=32
model=Sequential()
model.add(Embedding(vocabulary_size, embedding_size, input_length=max_words))
model.add(LSTM(100))
model.add(Dense(1, activation='sigmoid'))
print(model.summary())
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
batch_size = 64
num_epochs = 3
X_valid, y_valid = X_train[:batch_size], y_train[:batch_size]
X_train2, y_train2 = X_train[batch_size:], y_train[batch_size:]
model.fit(X_train2, y_train2, validation_data=(X_valid, y_valid), batch_size=batch_size, epochs=num_epochs)
scores = model.evaluate(X_test, y_test, verbose=0)
print('Test accuracy:', scores[1])
| 0.772531 | 0.962462 |
# Brain tumor 3D segmentation with MONAI
This tutorial shows how to construct a training workflow of multi-labels segmentation task.
And it contains below features:
1. Transforms for dictionary format data.
1. Define a new transform according to MONAI transform API.
1. Load Nifti image with metadata, load a list of images and stack them.
1. Randomly adjust intensity for data augmentation.
1. Cache IO and transforms to accelerate training and validation.
1. 3D UNet model, Dice loss function, Mean Dice metric for 3D segmentation task.
1. Deterministic training for reproducibility.
The dataset comes from http://medicaldecathlon.com/.
Target: Gliomas segmentation necrotic/active tumour and oedema
Modality: Multimodal multisite MRI data (FLAIR, T1w, T1gd,T2w)
Size: 750 4D volumes (484 Training + 266 Testing)
Source: BRATS 2016 and 2017 datasets.
Challenge: Complex and heterogeneously-located targets
Below figure shows image patches with the tumor sub-regions that are annotated in the different modalities (top left) and the final labels for the whole dataset (right).
(Figure taken from the [BraTS IEEE TMI paper](https://ieeexplore.ieee.org/document/6975210/))

The image patches show from left to right:
1. the whole tumor (yellow) visible in T2-FLAIR (Fig.A).
1. the tumor core (red) visible in T2 (Fig.B).
1. the enhancing tumor structures (light blue) visible in T1Gd, surrounding the cystic/necrotic components of the core (green) (Fig. C).
1. The segmentations are combined to generate the final labels of the tumor sub-regions (Fig.D): edema (yellow), non-enhancing solid core (red), necrotic/cystic core (green), enhancing core (blue).
[](https://colab.research.google.com/github/Project-MONAI/tutorials/blob/master/3d_segmentation/brats_segmentation_3d.ipynb)
## Setup imports
```
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
import matplotlib.pyplot as plt
import numpy as np
from monai.apps import DecathlonDataset
from monai.config import print_config
from monai.data import DataLoader
from monai.losses import DiceLoss
from monai.metrics import DiceMetric
from monai.networks.nets import UNet
from monai.networks.layers import Norm, Act
from monai.transforms import (
Activations,
AsChannelFirstd,
AsDiscrete,
CenterSpatialCropd,
Compose,
LoadImaged,
MapTransform,
NormalizeIntensityd,
Orientationd,
RandFlipd,
RandScaleIntensityd,
RandShiftIntensityd,
RandSpatialCropd,
Spacingd,
ToTensord,
)
from monai.utils import set_determinism
from tqdm.notebook import trange
import torch
print_config()
```
## Setup data directory
You can specify a directory with the `MONAI_DATA_DIRECTORY` environment variable.
This allows you to save results and reuse downloads.
If not specified a temporary directory will be used.
```
directory = os.environ.get("MONAI_DATA_DIRECTORY")
root_dir = tempfile.mkdtemp() if directory is None else directory
print(root_dir)
```
## Set deterministic training for reproducibility
```
set_determinism(seed=0)
```
## Define a new transform to convert brain tumor labels
Here we convert the multi-classes labels into multi-labels segmentation task in One-Hot format.
```
class ConvertToMultiChannelBasedOnBratsClassesd(MapTransform):
"""
Convert labels to multi channels based on brats classes:
label 1 is the peritumoral edema
label 2 is the GD-enhancing tumor
label 3 is the necrotic and non-enhancing tumor core
The possible classes are TC (Tumor core), WT (Whole tumor)
and ET (Enhancing tumor).
"""
def __call__(self, data):
d = dict(data)
for key in self.keys:
result = []
# merge label 2 and label 3 to construct TC
result.append(np.logical_or(d[key] == 2, d[key] == 3))
# merge labels 1, 2 and 3 to construct WT
result.append(
np.logical_or(
np.logical_or(d[key] == 2, d[key] == 3), d[key] == 1
)
)
# label 2 is ET
result.append(d[key] == 2)
d[key] = np.stack(result, axis=0).astype(np.float32)
return d
```
## Setup transforms for training and validation
```
train_transform = Compose(
[
# load 4 Nifti images and stack them together
LoadImaged(keys=["image", "label"]),
AsChannelFirstd(keys="image"),
ConvertToMultiChannelBasedOnBratsClassesd(keys="label"),
Spacingd(
keys=["image", "label"],
pixdim=(1.5, 1.5, 2.0),
mode=("bilinear", "nearest"),
),
Orientationd(keys=["image", "label"], axcodes="RAS"),
RandSpatialCropd(
keys=["image", "label"], roi_size=[128, 128, 64], random_size=False
),
RandFlipd(keys=["image", "label"], prob=0.5, spatial_axis=0),
NormalizeIntensityd(keys="image", nonzero=True, channel_wise=True),
RandScaleIntensityd(keys="image", factors=0.1, prob=0.5),
RandShiftIntensityd(keys="image", offsets=0.1, prob=0.5),
ToTensord(keys=["image", "label"]),
]
)
val_transform = Compose(
[
LoadImaged(keys=["image", "label"]),
AsChannelFirstd(keys="image"),
ConvertToMultiChannelBasedOnBratsClassesd(keys="label"),
Spacingd(
keys=["image", "label"],
pixdim=(1.5, 1.5, 2.0),
mode=("bilinear", "nearest"),
),
Orientationd(keys=["image", "label"], axcodes="RAS"),
CenterSpatialCropd(keys=["image", "label"], roi_size=[128, 128, 64]),
NormalizeIntensityd(keys="image", nonzero=True, channel_wise=True),
ToTensord(keys=["image", "label"]),
]
)
```
## Quickly load data with DecathlonDataset
Here we use `DecathlonDataset` to automatically download and extract the dataset.
It inherits MONAI `CacheDataset`, so we set `cache_num=100` to cache 100 items for training and use the defaut args to cache all the items for validation.
```
train_ds = DecathlonDataset(
root_dir=root_dir,
task="Task01_BrainTumour",
transform=train_transform,
section="training",
download=True,
cache_num=100,
)
train_loader = DataLoader(train_ds, batch_size=2, shuffle=True)
val_ds = DecathlonDataset(
root_dir=root_dir,
task="Task01_BrainTumour",
transform=val_transform,
section="validation",
download=False,
)
val_loader = DataLoader(val_ds, batch_size=2, shuffle=False)
```
## Check data shape and visualize
```
# pick one image from DecathlonDataset to visualize and check the 4 channels
print(f"image shape: {val_ds[2]['image'].shape}")
plt.figure("image", (24, 6))
for i in range(4):
plt.subplot(1, 4, i + 1)
plt.title(f"image channel {i}")
plt.imshow(val_ds[2]["image"][i, :, :, 20].detach().cpu(), cmap="gray")
plt.show()
# also visualize the 3 channels label corresponding to this image
print(f"label shape: {val_ds[2]['label'].shape}")
plt.figure("label", (18, 6))
for i in range(3):
plt.subplot(1, 3, i + 1)
plt.title(f"label channel {i}")
plt.imshow(val_ds[2]["label"][i, :, :, 20].detach().cpu())
plt.show()
```
## Create Model, Loss, Optimizer
```
ACTIVATIONS = [
Act.RELU,
Act.PRELU,
Act.LEAKYRELU,
Act.MISH,
Act.SWISH
]
def training_loop(model, optimizer, loss_function, name, max_epochs=180, val_interval=10):
best_metric = -1
best_metric_epoch = -1
epoch_loss_values = []
metric_values = []
post_pred = AsDiscrete(argmax=True, to_onehot=True, n_classes=2)
post_label = AsDiscrete(to_onehot=True, n_classes=2)
t = trange(max_epochs)
epoch_loss = 0
for epoch in t:
model.train()
t.set_description(f"epoch {epoch + 1}/{max_epochs} with loss {epoch_loss}", refresh=True)
epoch_loss = 0
step = 0
for batch_data in train_loader:
step += 1
inputs, labels = (
batch_data["image"].to(device),
batch_data["label"].to(device),
)
optimizer.zero_grad()
outputs = model(inputs)
loss = loss_function(outputs, labels)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_loss /= step
epoch_loss_values.append(epoch_loss)
if (epoch + 1) % val_interval == 0:
model.eval()
with torch.no_grad():
metric_sum = 0.0
metric_count = 0
for val_data in val_loader:
val_inputs, val_labels = (
val_data["image"].to(device),
val_data["label"].to(device),
)
roi_size = (160, 160, 160)
sw_batch_size = 4
val_outputs = sliding_window_inference(
val_inputs, roi_size, sw_batch_size, model)
val_outputs = post_pred(val_outputs)
val_labels = post_label(val_labels)
value = compute_meandice(
y_pred=val_outputs,
y=val_labels,
include_background=False,
)
metric_count += len(value)
metric_sum += value.sum().item()
metric = metric_sum / metric_count
metric_values.append(metric)
if metric > best_metric:
best_metric = metric
best_metric_epoch = epoch + 1
torch.save(model.state_dict(), os.path.join(
root_dir, f"model_{name}.pth"))
# standard PyTorch program style: create UNet, DiceLoss and Adam optimizer
for activation in ACTIVATIONS:
device = torch.device("cuda:0")
model = UNet(
dimensions=3,
in_channels=4,
out_channels=3,
channels=(16, 32, 64, 128, 256),
strides=(2, 2, 2, 2),
num_res_units=2,
act=activation,
norm=Norm.BATCH,
).to(device)
loss_function = DiceLoss(to_onehot_y=False, sigmoid=True, squared_pred=True)
optimizer = torch.optim.Adam(
model.parameters(), 1e-4, weight_decay=1e-5, amsgrad=True
)
print(f"Training {activation}")
training_loop(model, optimizer, loss_function, name=str(activation))
```
## Execute a typical PyTorch training process
```
max_epochs = 180
val_interval = 2
best_metric = -1
best_metric_epoch = -1
epoch_loss_values = []
metric_values = []
metric_values_tc = []
metric_values_wt = []
metric_values_et = []
for epoch in range(max_epochs):
print("-" * 10)
print(f"epoch {epoch + 1}/{max_epochs}")
model.train()
epoch_loss = 0
step = 0
for batch_data in train_loader:
step += 1
inputs, labels = (
batch_data["image"].to(device),
batch_data["label"].to(device),
)
optimizer.zero_grad()
outputs = model(inputs)
loss = loss_function(outputs, labels)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
print(
f"{step}/{len(train_ds) // train_loader.batch_size}"
f", train_loss: {loss.item():.4f}"
)
epoch_loss /= step
epoch_loss_values.append(epoch_loss)
print(f"epoch {epoch + 1} average loss: {epoch_loss:.4f}")
if (epoch + 1) % val_interval == 0:
model.eval()
with torch.no_grad():
dice_metric = DiceMetric(include_background=True, reduction="mean")
post_trans = Compose(
[Activations(sigmoid=True), AsDiscrete(threshold_values=True)]
)
metric_sum = metric_sum_tc = metric_sum_wt = metric_sum_et = 0.0
metric_count = (
metric_count_tc
) = metric_count_wt = metric_count_et = 0
for val_data in val_loader:
val_inputs, val_labels = (
val_data["image"].to(device),
val_data["label"].to(device),
)
val_outputs = model(val_inputs)
val_outputs = post_trans(val_outputs)
# compute overall mean dice
value, not_nans = dice_metric(y_pred=val_outputs, y=val_labels)
not_nans = not_nans.item()
metric_count += not_nans
metric_sum += value.item() * not_nans
# compute mean dice for TC
value_tc, not_nans = dice_metric(
y_pred=val_outputs[:, 0:1], y=val_labels[:, 0:1]
)
not_nans = not_nans.item()
metric_count_tc += not_nans
metric_sum_tc += value_tc.item() * not_nans
# compute mean dice for WT
value_wt, not_nans = dice_metric(
y_pred=val_outputs[:, 1:2], y=val_labels[:, 1:2]
)
not_nans = not_nans.item()
metric_count_wt += not_nans
metric_sum_wt += value_wt.item() * not_nans
# compute mean dice for ET
value_et, not_nans = dice_metric(
y_pred=val_outputs[:, 2:3], y=val_labels[:, 2:3]
)
not_nans = not_nans.item()
metric_count_et += not_nans
metric_sum_et += value_et.item() * not_nans
metric = metric_sum / metric_count
metric_values.append(metric)
metric_tc = metric_sum_tc / metric_count_tc
metric_values_tc.append(metric_tc)
metric_wt = metric_sum_wt / metric_count_wt
metric_values_wt.append(metric_wt)
metric_et = metric_sum_et / metric_count_et
metric_values_et.append(metric_et)
if metric > best_metric:
best_metric = metric
best_metric_epoch = epoch + 1
torch.save(
model.state_dict(),
os.path.join(root_dir, "best_metric_model.pth"),
)
print("saved new best metric model")
print(
f"current epoch: {epoch + 1} current mean dice: {metric:.4f}"
f" tc: {metric_tc:.4f} wt: {metric_wt:.4f} et: {metric_et:.4f}"
f"\nbest mean dice: {best_metric:.4f}"
f" at epoch: {best_metric_epoch}"
)
print(
f"train completed, best_metric: {best_metric:.4f}"
f" at epoch: {best_metric_epoch}"
)
```
## Plot the loss and metric
```
plt.figure("train", (12, 6))
plt.subplot(1, 2, 1)
plt.title("Epoch Average Loss")
x = [i + 1 for i in range(len(epoch_loss_values))]
y = epoch_loss_values
plt.xlabel("epoch")
plt.plot(x, y, color="red")
plt.subplot(1, 2, 2)
plt.title("Val Mean Dice")
x = [val_interval * (i + 1) for i in range(len(metric_values))]
y = metric_values
plt.xlabel("epoch")
plt.plot(x, y, color="green")
plt.show()
plt.figure("train", (18, 6))
plt.subplot(1, 3, 1)
plt.title("Val Mean Dice TC")
x = [val_interval * (i + 1) for i in range(len(metric_values_tc))]
y = metric_values_tc
plt.xlabel("epoch")
plt.plot(x, y, color="blue")
plt.subplot(1, 3, 2)
plt.title("Val Mean Dice WT")
x = [val_interval * (i + 1) for i in range(len(metric_values_wt))]
y = metric_values_wt
plt.xlabel("epoch")
plt.plot(x, y, color="brown")
plt.subplot(1, 3, 3)
plt.title("Val Mean Dice ET")
x = [val_interval * (i + 1) for i in range(len(metric_values_et))]
y = metric_values_et
plt.xlabel("epoch")
plt.plot(x, y, color="purple")
plt.show()
```
## Check best model output with the input image and label
```
model.load_state_dict(
torch.load(os.path.join(root_dir, "best_metric_model.pth"))
)
model.eval()
with torch.no_grad():
# select one image to evaluate and visualize the model output
val_input = val_ds[6]["image"].unsqueeze(0).to(device)
val_output = model(val_input)
plt.figure("image", (24, 6))
for i in range(4):
plt.subplot(1, 4, i + 1)
plt.title(f"image channel {i}")
plt.imshow(val_ds[6]["image"][i, :, :, 20].detach().cpu(), cmap="gray")
plt.show()
# visualize the 3 channels label corresponding to this image
plt.figure("label", (18, 6))
for i in range(3):
plt.subplot(1, 3, i + 1)
plt.title(f"label channel {i}")
plt.imshow(val_ds[6]["label"][i, :, :, 20].detach().cpu())
plt.show()
# visualize the 3 channels model output corresponding to this image
plt.figure("output", (18, 6))
for i in range(3):
plt.subplot(1, 3, i + 1)
plt.title(f"output channel {i}")
plt.imshow(val_output[0, i, :, :, 20].detach().cpu())
plt.show()
```
## Cleanup data directory
Remove directory if a temporary was used.
```
if directory is None:
shutil.rmtree(root_dir)
```
|
github_jupyter
|
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
import matplotlib.pyplot as plt
import numpy as np
from monai.apps import DecathlonDataset
from monai.config import print_config
from monai.data import DataLoader
from monai.losses import DiceLoss
from monai.metrics import DiceMetric
from monai.networks.nets import UNet
from monai.networks.layers import Norm, Act
from monai.transforms import (
Activations,
AsChannelFirstd,
AsDiscrete,
CenterSpatialCropd,
Compose,
LoadImaged,
MapTransform,
NormalizeIntensityd,
Orientationd,
RandFlipd,
RandScaleIntensityd,
RandShiftIntensityd,
RandSpatialCropd,
Spacingd,
ToTensord,
)
from monai.utils import set_determinism
from tqdm.notebook import trange
import torch
print_config()
directory = os.environ.get("MONAI_DATA_DIRECTORY")
root_dir = tempfile.mkdtemp() if directory is None else directory
print(root_dir)
set_determinism(seed=0)
class ConvertToMultiChannelBasedOnBratsClassesd(MapTransform):
"""
Convert labels to multi channels based on brats classes:
label 1 is the peritumoral edema
label 2 is the GD-enhancing tumor
label 3 is the necrotic and non-enhancing tumor core
The possible classes are TC (Tumor core), WT (Whole tumor)
and ET (Enhancing tumor).
"""
def __call__(self, data):
d = dict(data)
for key in self.keys:
result = []
# merge label 2 and label 3 to construct TC
result.append(np.logical_or(d[key] == 2, d[key] == 3))
# merge labels 1, 2 and 3 to construct WT
result.append(
np.logical_or(
np.logical_or(d[key] == 2, d[key] == 3), d[key] == 1
)
)
# label 2 is ET
result.append(d[key] == 2)
d[key] = np.stack(result, axis=0).astype(np.float32)
return d
train_transform = Compose(
[
# load 4 Nifti images and stack them together
LoadImaged(keys=["image", "label"]),
AsChannelFirstd(keys="image"),
ConvertToMultiChannelBasedOnBratsClassesd(keys="label"),
Spacingd(
keys=["image", "label"],
pixdim=(1.5, 1.5, 2.0),
mode=("bilinear", "nearest"),
),
Orientationd(keys=["image", "label"], axcodes="RAS"),
RandSpatialCropd(
keys=["image", "label"], roi_size=[128, 128, 64], random_size=False
),
RandFlipd(keys=["image", "label"], prob=0.5, spatial_axis=0),
NormalizeIntensityd(keys="image", nonzero=True, channel_wise=True),
RandScaleIntensityd(keys="image", factors=0.1, prob=0.5),
RandShiftIntensityd(keys="image", offsets=0.1, prob=0.5),
ToTensord(keys=["image", "label"]),
]
)
val_transform = Compose(
[
LoadImaged(keys=["image", "label"]),
AsChannelFirstd(keys="image"),
ConvertToMultiChannelBasedOnBratsClassesd(keys="label"),
Spacingd(
keys=["image", "label"],
pixdim=(1.5, 1.5, 2.0),
mode=("bilinear", "nearest"),
),
Orientationd(keys=["image", "label"], axcodes="RAS"),
CenterSpatialCropd(keys=["image", "label"], roi_size=[128, 128, 64]),
NormalizeIntensityd(keys="image", nonzero=True, channel_wise=True),
ToTensord(keys=["image", "label"]),
]
)
train_ds = DecathlonDataset(
root_dir=root_dir,
task="Task01_BrainTumour",
transform=train_transform,
section="training",
download=True,
cache_num=100,
)
train_loader = DataLoader(train_ds, batch_size=2, shuffle=True)
val_ds = DecathlonDataset(
root_dir=root_dir,
task="Task01_BrainTumour",
transform=val_transform,
section="validation",
download=False,
)
val_loader = DataLoader(val_ds, batch_size=2, shuffle=False)
# pick one image from DecathlonDataset to visualize and check the 4 channels
print(f"image shape: {val_ds[2]['image'].shape}")
plt.figure("image", (24, 6))
for i in range(4):
plt.subplot(1, 4, i + 1)
plt.title(f"image channel {i}")
plt.imshow(val_ds[2]["image"][i, :, :, 20].detach().cpu(), cmap="gray")
plt.show()
# also visualize the 3 channels label corresponding to this image
print(f"label shape: {val_ds[2]['label'].shape}")
plt.figure("label", (18, 6))
for i in range(3):
plt.subplot(1, 3, i + 1)
plt.title(f"label channel {i}")
plt.imshow(val_ds[2]["label"][i, :, :, 20].detach().cpu())
plt.show()
ACTIVATIONS = [
Act.RELU,
Act.PRELU,
Act.LEAKYRELU,
Act.MISH,
Act.SWISH
]
def training_loop(model, optimizer, loss_function, name, max_epochs=180, val_interval=10):
best_metric = -1
best_metric_epoch = -1
epoch_loss_values = []
metric_values = []
post_pred = AsDiscrete(argmax=True, to_onehot=True, n_classes=2)
post_label = AsDiscrete(to_onehot=True, n_classes=2)
t = trange(max_epochs)
epoch_loss = 0
for epoch in t:
model.train()
t.set_description(f"epoch {epoch + 1}/{max_epochs} with loss {epoch_loss}", refresh=True)
epoch_loss = 0
step = 0
for batch_data in train_loader:
step += 1
inputs, labels = (
batch_data["image"].to(device),
batch_data["label"].to(device),
)
optimizer.zero_grad()
outputs = model(inputs)
loss = loss_function(outputs, labels)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_loss /= step
epoch_loss_values.append(epoch_loss)
if (epoch + 1) % val_interval == 0:
model.eval()
with torch.no_grad():
metric_sum = 0.0
metric_count = 0
for val_data in val_loader:
val_inputs, val_labels = (
val_data["image"].to(device),
val_data["label"].to(device),
)
roi_size = (160, 160, 160)
sw_batch_size = 4
val_outputs = sliding_window_inference(
val_inputs, roi_size, sw_batch_size, model)
val_outputs = post_pred(val_outputs)
val_labels = post_label(val_labels)
value = compute_meandice(
y_pred=val_outputs,
y=val_labels,
include_background=False,
)
metric_count += len(value)
metric_sum += value.sum().item()
metric = metric_sum / metric_count
metric_values.append(metric)
if metric > best_metric:
best_metric = metric
best_metric_epoch = epoch + 1
torch.save(model.state_dict(), os.path.join(
root_dir, f"model_{name}.pth"))
# standard PyTorch program style: create UNet, DiceLoss and Adam optimizer
for activation in ACTIVATIONS:
device = torch.device("cuda:0")
model = UNet(
dimensions=3,
in_channels=4,
out_channels=3,
channels=(16, 32, 64, 128, 256),
strides=(2, 2, 2, 2),
num_res_units=2,
act=activation,
norm=Norm.BATCH,
).to(device)
loss_function = DiceLoss(to_onehot_y=False, sigmoid=True, squared_pred=True)
optimizer = torch.optim.Adam(
model.parameters(), 1e-4, weight_decay=1e-5, amsgrad=True
)
print(f"Training {activation}")
training_loop(model, optimizer, loss_function, name=str(activation))
max_epochs = 180
val_interval = 2
best_metric = -1
best_metric_epoch = -1
epoch_loss_values = []
metric_values = []
metric_values_tc = []
metric_values_wt = []
metric_values_et = []
for epoch in range(max_epochs):
print("-" * 10)
print(f"epoch {epoch + 1}/{max_epochs}")
model.train()
epoch_loss = 0
step = 0
for batch_data in train_loader:
step += 1
inputs, labels = (
batch_data["image"].to(device),
batch_data["label"].to(device),
)
optimizer.zero_grad()
outputs = model(inputs)
loss = loss_function(outputs, labels)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
print(
f"{step}/{len(train_ds) // train_loader.batch_size}"
f", train_loss: {loss.item():.4f}"
)
epoch_loss /= step
epoch_loss_values.append(epoch_loss)
print(f"epoch {epoch + 1} average loss: {epoch_loss:.4f}")
if (epoch + 1) % val_interval == 0:
model.eval()
with torch.no_grad():
dice_metric = DiceMetric(include_background=True, reduction="mean")
post_trans = Compose(
[Activations(sigmoid=True), AsDiscrete(threshold_values=True)]
)
metric_sum = metric_sum_tc = metric_sum_wt = metric_sum_et = 0.0
metric_count = (
metric_count_tc
) = metric_count_wt = metric_count_et = 0
for val_data in val_loader:
val_inputs, val_labels = (
val_data["image"].to(device),
val_data["label"].to(device),
)
val_outputs = model(val_inputs)
val_outputs = post_trans(val_outputs)
# compute overall mean dice
value, not_nans = dice_metric(y_pred=val_outputs, y=val_labels)
not_nans = not_nans.item()
metric_count += not_nans
metric_sum += value.item() * not_nans
# compute mean dice for TC
value_tc, not_nans = dice_metric(
y_pred=val_outputs[:, 0:1], y=val_labels[:, 0:1]
)
not_nans = not_nans.item()
metric_count_tc += not_nans
metric_sum_tc += value_tc.item() * not_nans
# compute mean dice for WT
value_wt, not_nans = dice_metric(
y_pred=val_outputs[:, 1:2], y=val_labels[:, 1:2]
)
not_nans = not_nans.item()
metric_count_wt += not_nans
metric_sum_wt += value_wt.item() * not_nans
# compute mean dice for ET
value_et, not_nans = dice_metric(
y_pred=val_outputs[:, 2:3], y=val_labels[:, 2:3]
)
not_nans = not_nans.item()
metric_count_et += not_nans
metric_sum_et += value_et.item() * not_nans
metric = metric_sum / metric_count
metric_values.append(metric)
metric_tc = metric_sum_tc / metric_count_tc
metric_values_tc.append(metric_tc)
metric_wt = metric_sum_wt / metric_count_wt
metric_values_wt.append(metric_wt)
metric_et = metric_sum_et / metric_count_et
metric_values_et.append(metric_et)
if metric > best_metric:
best_metric = metric
best_metric_epoch = epoch + 1
torch.save(
model.state_dict(),
os.path.join(root_dir, "best_metric_model.pth"),
)
print("saved new best metric model")
print(
f"current epoch: {epoch + 1} current mean dice: {metric:.4f}"
f" tc: {metric_tc:.4f} wt: {metric_wt:.4f} et: {metric_et:.4f}"
f"\nbest mean dice: {best_metric:.4f}"
f" at epoch: {best_metric_epoch}"
)
print(
f"train completed, best_metric: {best_metric:.4f}"
f" at epoch: {best_metric_epoch}"
)
plt.figure("train", (12, 6))
plt.subplot(1, 2, 1)
plt.title("Epoch Average Loss")
x = [i + 1 for i in range(len(epoch_loss_values))]
y = epoch_loss_values
plt.xlabel("epoch")
plt.plot(x, y, color="red")
plt.subplot(1, 2, 2)
plt.title("Val Mean Dice")
x = [val_interval * (i + 1) for i in range(len(metric_values))]
y = metric_values
plt.xlabel("epoch")
plt.plot(x, y, color="green")
plt.show()
plt.figure("train", (18, 6))
plt.subplot(1, 3, 1)
plt.title("Val Mean Dice TC")
x = [val_interval * (i + 1) for i in range(len(metric_values_tc))]
y = metric_values_tc
plt.xlabel("epoch")
plt.plot(x, y, color="blue")
plt.subplot(1, 3, 2)
plt.title("Val Mean Dice WT")
x = [val_interval * (i + 1) for i in range(len(metric_values_wt))]
y = metric_values_wt
plt.xlabel("epoch")
plt.plot(x, y, color="brown")
plt.subplot(1, 3, 3)
plt.title("Val Mean Dice ET")
x = [val_interval * (i + 1) for i in range(len(metric_values_et))]
y = metric_values_et
plt.xlabel("epoch")
plt.plot(x, y, color="purple")
plt.show()
model.load_state_dict(
torch.load(os.path.join(root_dir, "best_metric_model.pth"))
)
model.eval()
with torch.no_grad():
# select one image to evaluate and visualize the model output
val_input = val_ds[6]["image"].unsqueeze(0).to(device)
val_output = model(val_input)
plt.figure("image", (24, 6))
for i in range(4):
plt.subplot(1, 4, i + 1)
plt.title(f"image channel {i}")
plt.imshow(val_ds[6]["image"][i, :, :, 20].detach().cpu(), cmap="gray")
plt.show()
# visualize the 3 channels label corresponding to this image
plt.figure("label", (18, 6))
for i in range(3):
plt.subplot(1, 3, i + 1)
plt.title(f"label channel {i}")
plt.imshow(val_ds[6]["label"][i, :, :, 20].detach().cpu())
plt.show()
# visualize the 3 channels model output corresponding to this image
plt.figure("output", (18, 6))
for i in range(3):
plt.subplot(1, 3, i + 1)
plt.title(f"output channel {i}")
plt.imshow(val_output[0, i, :, :, 20].detach().cpu())
plt.show()
if directory is None:
shutil.rmtree(root_dir)
| 0.819857 | 0.989234 |
```
#! /usr/bin/python2
from pyspark import SparkConf, SparkContext
sc = SparkContext(conf=SparkConf().setAppName("MyApp").setMaster("local[2]"))
import sys
import re
if sys.version_info[0] >= 3:
unicode = str
def parse_article(line):
try:
article_id, text = unicode(line.rstrip()).split('\t', 1)
text = re.sub(r"^\W+|\W+$", "", text, flags=re.UNICODE)
words = re.split(r"\W*\s+\W*", text, flags=re.UNICODE)
return words
except ValueError as e:
return []
def pairs_starting_from_word(words, first_word='word'):
pairs = []
for i, word in enumerate(words[:-1]):
if (word == first_word):
pair = '{}_{}'.format(word, words[i+1])
cnt = 1
pairs.append((pair, cnt))
else:
continue
return pairs
def create_bigrams(words):
bigrams = []
for i, word in enumerate(words[:-1]):
pair = u'_'.join((word, words[i+1]))#.encode('utf-8')
cnt = 1
bigrams.append((pair, cnt))
return bigrams
from math import log
def calc_npmi(pair, cnt, words_occurrences_dict, total_num_of_words, total_num_of_pairs):
word1, word2 = pair.split('_')
p_a = words_occurrences_dict[word1] / total_num_of_words
p_b = words_occurrences_dict[word2] / total_num_of_words
pmi_ab = cnt / total_num_of_pairs
pmi_a_b = log(pmi_ab / (p_a * p_b))
nmpi_a_b = pmi_a_b / -log(pmi_ab)
return (pair, nmpi_a_b)
wiki = sc.textFile("/data/wiki/en_articles_part/articles-part", 16).map(parse_article)
with open('/datasets/stop_words_en.txt', 'r') as f:
stop_words = set(f.read().split())
#stop_words = map(str.lower, stop_words_1)
# lowercase all words
wiki_lower = wiki.map(lambda words: [x.lower() for x in words])
stop_words_lower = [x.lower() for x in stop_words]
# words not in stop_words_en.txt
wiki_filt = wiki_lower.map(lambda words: [x for x in words if x not in stop_words_lower])
# create bigrams
wiki_bigrams = wiki_filt.flatMap(create_bigrams)
# aggregate counters
wiki_red = wiki_bigrams.reduceByKey(lambda a, b: a + b)
# filter values by counter
wiki_red_filt = wiki_red.filter(lambda pair_cnt: pair_cnt[1] >= 500)
# total number of words
tot_num_words = wiki_filt.map(lambda words: len(words))
tot_num_words = tot_num_words.reduce(lambda a, b: a + b)
tot_num_words
# total number of words pairs
tot_num_pairs = wiki_filt.map(lambda words: len(words) - 1)
tot_num_pairs = tot_num_pairs.reduce(lambda a, b: a + b)
tot_num_pairs
# number of each word occurrences
words_occ = wiki_filt.flatMap(lambda words: [(x, 1) for x in words])
words_occ = words_occ.reduceByKey(lambda a, b: a + b)
#words_occ = words_occ.filter(lambda pair_cnt: pair_cnt[1] >= 500)
words_occ = words_occ.collect()
words_occ_dict = dict()
for item, cnt in words_occ:
words_occ_dict[item] = cnt
pairs_npmi = wiki_red_filt\
.map(lambda pair_cnt: calc_npmi(pair_cnt[0], pair_cnt[1], words_occ_dict, tot_num_words, tot_num_pairs))\
.map(lambda a_b: (a_b[1], a_b[0]))\
.sortByKey(False)\
.map(lambda a_b: (a_b[1], a_b[0]))\
.take(39)
#print(len(pairs_npmi))
for pair, npmi in pairs_npmi:
print (unicode(pair))
```
|
github_jupyter
|
#! /usr/bin/python2
from pyspark import SparkConf, SparkContext
sc = SparkContext(conf=SparkConf().setAppName("MyApp").setMaster("local[2]"))
import sys
import re
if sys.version_info[0] >= 3:
unicode = str
def parse_article(line):
try:
article_id, text = unicode(line.rstrip()).split('\t', 1)
text = re.sub(r"^\W+|\W+$", "", text, flags=re.UNICODE)
words = re.split(r"\W*\s+\W*", text, flags=re.UNICODE)
return words
except ValueError as e:
return []
def pairs_starting_from_word(words, first_word='word'):
pairs = []
for i, word in enumerate(words[:-1]):
if (word == first_word):
pair = '{}_{}'.format(word, words[i+1])
cnt = 1
pairs.append((pair, cnt))
else:
continue
return pairs
def create_bigrams(words):
bigrams = []
for i, word in enumerate(words[:-1]):
pair = u'_'.join((word, words[i+1]))#.encode('utf-8')
cnt = 1
bigrams.append((pair, cnt))
return bigrams
from math import log
def calc_npmi(pair, cnt, words_occurrences_dict, total_num_of_words, total_num_of_pairs):
word1, word2 = pair.split('_')
p_a = words_occurrences_dict[word1] / total_num_of_words
p_b = words_occurrences_dict[word2] / total_num_of_words
pmi_ab = cnt / total_num_of_pairs
pmi_a_b = log(pmi_ab / (p_a * p_b))
nmpi_a_b = pmi_a_b / -log(pmi_ab)
return (pair, nmpi_a_b)
wiki = sc.textFile("/data/wiki/en_articles_part/articles-part", 16).map(parse_article)
with open('/datasets/stop_words_en.txt', 'r') as f:
stop_words = set(f.read().split())
#stop_words = map(str.lower, stop_words_1)
# lowercase all words
wiki_lower = wiki.map(lambda words: [x.lower() for x in words])
stop_words_lower = [x.lower() for x in stop_words]
# words not in stop_words_en.txt
wiki_filt = wiki_lower.map(lambda words: [x for x in words if x not in stop_words_lower])
# create bigrams
wiki_bigrams = wiki_filt.flatMap(create_bigrams)
# aggregate counters
wiki_red = wiki_bigrams.reduceByKey(lambda a, b: a + b)
# filter values by counter
wiki_red_filt = wiki_red.filter(lambda pair_cnt: pair_cnt[1] >= 500)
# total number of words
tot_num_words = wiki_filt.map(lambda words: len(words))
tot_num_words = tot_num_words.reduce(lambda a, b: a + b)
tot_num_words
# total number of words pairs
tot_num_pairs = wiki_filt.map(lambda words: len(words) - 1)
tot_num_pairs = tot_num_pairs.reduce(lambda a, b: a + b)
tot_num_pairs
# number of each word occurrences
words_occ = wiki_filt.flatMap(lambda words: [(x, 1) for x in words])
words_occ = words_occ.reduceByKey(lambda a, b: a + b)
#words_occ = words_occ.filter(lambda pair_cnt: pair_cnt[1] >= 500)
words_occ = words_occ.collect()
words_occ_dict = dict()
for item, cnt in words_occ:
words_occ_dict[item] = cnt
pairs_npmi = wiki_red_filt\
.map(lambda pair_cnt: calc_npmi(pair_cnt[0], pair_cnt[1], words_occ_dict, tot_num_words, tot_num_pairs))\
.map(lambda a_b: (a_b[1], a_b[0]))\
.sortByKey(False)\
.map(lambda a_b: (a_b[1], a_b[0]))\
.take(39)
#print(len(pairs_npmi))
for pair, npmi in pairs_npmi:
print (unicode(pair))
| 0.148819 | 0.237797 |
# Benchmarks
In this notebook we will try to identify the potential bottlenecks of the synchronization system, and show a series of benchmarks with different RDF datasets.
## Setup
As always, we will begin by adding the hercules_sync library to our path, and setting up the logging system.
```
import logging
import os
import sys
# set up module paths for imports
module_path = os.path.abspath(os.path.join('..'))
hercules_sync_path = os.path.abspath(os.path.join('..', 'hercules_sync'))
sys.path.append(module_path)
sys.path.append(hercules_sync_path)
# start logging system and set logging level
logger = logging.getLogger()
logger.setLevel(logging.ERROR)
logging.info("Starting logger")
output_images_path = "img"
%load_ext snakeviz
import matplotlib.pyplot as plt
plt.style.use('seaborn-paper')
%matplotlib inline
```
## Datasets used
In order to execute our benchmarks we are going to use two different datasets:
* Real dataset: A DBpedia dataset with information about different people is going to be used to represent the performance of our system with a real dataset.
* Synthetic dataset: We are also going to use the Berlin SPARQL Benchmark tool to generate datasets of different sizes and observe the performance of our system as the size of the data to be synchronized increases.
We will begin by defining a set of functions that can be used to download the dbpedia dataset and obtain a subset of it:
```
import bz2
import urllib.request
def read_zipped_dataset(url, decompressor=bz2):
response = urllib.request.urlopen(url)
content = bz2.decompress(response.read())
return content
def get_first_lines(string, num_lines):
return b'\n'.join(string.split(b'\n')[:num_lines])
```
Now, we will use the functions defined above to load the person data from DBpedia. Three different datasets will be stored:
* dbpedia_dataset will contain the complete dataset
* dbpedia_dataset_preview will contain just the first 100 lines of the dataset. This subset will be used first to identify potential bottlenecks of the system.
* dbpedia_dataset_final will contain the first 4000 lines of the dataset. This subset will be used later on for our benchmarks will real data.
```
DBPEDIA_PERSONDATA_URL = 'http://downloads.dbpedia.org/3.4/en/persondata_en.nt.bz2'
NUM_TRIPLES_FINAL = 5000
NUM_TRIPLES_PREVIEW = 100
dbpedia_dataset = read_zipped_dataset(DBPEDIA_PERSONDATA_URL, bz2)
dbpedia_dataset_preview = get_first_lines(dbpedia_dataset, NUM_TRIPLES_PREVIEW)
dbpedia_dataset_final = get_first_lines(dbpedia_dataset, NUM_TRIPLES_FINAL)
```
Now, we will proceed to create the synthetic datasets. We will first create a function that calls the BSBM tool to generate a synthetic dataset with the given number of products:
```
import subprocess
OUTPUT_FILE_NAME = "dataset.nt"
def gen_synthetic_data(num_products=100, bsbmtools_dir="bsbmtools-0.2"):
subprocess.call(f"cd {bsbmtools_dir} && ./generate -fc -pc {num_products}", shell=True)
with open(os.path.join(bsbmtools_dir, OUTPUT_FILE_NAME), 'r') as f:
res = f.read()
return res
```
Now we will call the function multiple times to produce files of different sizes to evaluate our system.
```
data = [gen_synthetic_data(i) for i in range(1, 47, 5)]
```
Finally, we will convert the previous list to a dict where the keys are the number of triples in the file, and the value is the file itself. This will be useful later on to plot the results of our benchmarks.
```
from rdflib.graph import Graph
def count_triples(string):
graph = Graph().parse(format='n3', data=string)
return len(graph)
synthetic_data_final = {count_triples(string): string for string in data}
synthetic_data_final.keys()
```
## Analysing bottlenecks of the application
In this section we are going to execute an initial dump of the DBpedia preview dataset to identify which are the parts of the synchronization that take the most amount of time to execute. First of all, we are going to define the algorithm to use, and to reset the URIs factory to an initial state:
```
from wbsync.external.uri_factory import URIFactoryMock
from wbsync.synchronization import GraphDiffSyncAlgorithm, OntologySynchronizer
algorithm = GraphDiffSyncAlgorithm()
synchronizer = OntologySynchronizer(algorithm)
factory = URIFactoryMock()
factory.reset_factory()
```
Now, we will define a function to completely reset the state of the Wikibase instance. Since we are going to make several dumps of data to the Wikibase in this notebook, it is important to reset its state before adding more data. In this case we are connecting to the machine where the Wikibase is running (in docker containers), and calling a script that resets the docker volumes:
```
import time
from wbsync.triplestore import WikibaseAdapter
from secret import SSH_USER, SSH_PASS, USERNAME, PASSWORD
wikibase_host = '156.35.94.149'
ssh_port = '22'
mediawiki_api_url = f'http://{wikibase_host}:8181/w/api.php'
sparql_endpoint_url = f'http://{wikibase_host}:8282/proxy/wdqs/bigdata/namespace/wdq/sparql'
adapter = None
def reset_wb_state(factory):
global adapter
print("Resetting Wikibase state...")
subprocess.call('cd ~/wikibase-docker && sh clean_restart.sh', shell=True)
print("Wikibase state has been reset")
factory.reset_factory()
time.sleep(15) # wait some time for docker containers to go up again
print("Logging to wikibase...")
adapter = WikibaseAdapter(mediawiki_api_url, sparql_endpoint_url, USERNAME, PASSWORD)
```
Now we will define the function to be benchmarked. This function executes the synchronization of the given data to Wikibase:
```
def execute_synchronization(source_content, target_content, synchronizer, adapter):
ops = synchronizer.synchronize(source_content, target_content)
for op in ops:
res = op.execute(adapter)
if not res.successful:
print(f"Error synchronizing triple: {res.message}")
```
Finally, we will run the function and obtain some visualizations about its performance using the snakeviz tool:
```
reset_wb_state(factory)
%snakeviz execute_synchronization("", dbpedia_dataset_preview, synchronizer, adapter)
```
As we can see above, more than half of the time regarding synchronization is spent in the write method of wdi_core. In the following sections we will try to propose some solutions to alleviate this issue.
## Batch vs Basic operations
As have seen in the previous section, the writing of triples to Wikibase is the main bottleneck of the system. In order to alleviate this problem, the 'optimize_ops' function is provided to convert a list of BasicOperations into BatchOperations.
With this optimization the system will try to perform less writting operations, using instead the 'update' methods provided by WikidataIntegrator. In this section we are going to compare the performance using the basic operations against the 'optimized' version. First of all, we are going to define the functions needed to execute both types of synchronization:
```
from wbsync.synchronization.operations import optimize_ops
def _synchronize(source_content, target_content, synchronizer, adapter, ops_callback):
ops = ops_callback(source_content, target_content)
for op in ops:
res = op.execute(adapter)
if not res.successful:
print(f"Error synchronizing triple: {res.message}")
def execute_basic_synchronization(source_content, target_content, synchronizer, adapter):
ops_callback = lambda f: synchronizer.synchronize(f)
return _synchronize(source_content, target_content, synchronizer, adapter, ops_callback)
def execute_batch_synchronization(source_content, target_content, synchronizer, adapter):
ops_callback = lambda f: optimize_ops(synchronizer.synchronize(f))
return _synchronize(source_content, target_content, synchronizer, adapter, ops_callback)
```
The first auxiliary function, '\_synchronize', is very similar to the synchronization function defined previously to profile the preview dataset. However, in this case we are receiving an additional callback to obtain the list of operations from the file.
After that we define two more functions. The first one corresponds to the basic approach used so far, where the operations are obtained directly from our OntologySynchronizer instance. The second function will additionally call the _optimize\_ops_ function on the operations to optimize them.
We will now declare a basic function to execute the synchronization on the synthetic dataset and time it:
```
import timeit
from functools import partial
def time_synth_dataset(callback, setup, dataset_dict):
results = {}
for num_triples, data in dataset_dict.items():
setup()
print(f"Execute callback: {num_triples} number of triples")
elapsed_time = timeit.timeit(partial(callback, data), number=1)
print(f"Callback finished in {elapsed_time} seconds")
results[num_triples] = elapsed_time
return results
```
With the function defined, we will measure both the times to synchronize the data with batch and base operations:
```
reset_state_callback = partial(reset_wb_state, factory=factory)
results_batch = time_synth_dataset(lambda f: execute_batch_synchronization("", f, synchronizer, adapter),
reset_state_callback, synthetic_data_final)
results_base = time_synth_dataset(lambda f: execute_basic_synchronization("", f, synchronizer, adapter),
reset_state_callback, synthetic_data_final)
print("Synthetic dataset")
print("-" * 25)
print("Batch times: ", results_batch)
print("Base times: ", results_base)
```
We can see how the batch updates perform considerably better. On average, executing the synchronization with batch is 3 times faster than using the basic approach.
Finally, we will plot the results that we have obtained to illustrate this better:
```
# line chart
def plot_synth_results(results_dict_base, results_dict_batch):
x1, y1 = results_dict_base.keys(), results_dict_base.values()
x2, y2 = results_dict_batch.keys(), results_dict_batch.values()
plt.figure(figsize=(15, 7))
plt.xlabel("Number of triples")
plt.ylabel("time (s)")
plt.title("Synchronization time with and without batch operations")
plt.plot(list(x1), list(y1), label="Base sync")
plt.plot(list(x2), list(y2), label="Batch sync")
plt.legend(loc="upper left")
plt.savefig(os.path.join(output_images_path, "linechart.png"), dpi=300, transparent=True)
plt.show()
plot_synth_results(results_base, results_batch)
```
Now we will see what are the results obtained with the real DBpedia dataset:
```
reset_wb_state(factory)
results_dbpedia_base = timeit.timeit(partial(execute_basic_synchronization, "", dbpedia_dataset_final, synchronizer, adapter), number=1)
reset_wb_state(factory)
results_dbpedia_batch = timeit.timeit(partial(execute_batch_synchronization, "", dbpedia_dataset_final, synchronizer, adapter), number=1)
print("Real dataset (DBpedia)")
print("-" * 35)
print(f"Batch times: {results_dbpedia_batch}s")
print(f"Base times: {results_dbpedia_base}s")
```
We can see that the results are comparable to the ones obtained with the synthetic dataset. In this case, the batch synchronization is around 2.5 times faster than the base one.
To conclude this notebook, we will plot the results in a bar chart to illustrate this difference in performance:
```
# bar chart
def plot_dbpedia_results(time_base, time_batch):
x = ['Basic', 'Batch']
y = [time_base, time_batch]
x_pos = [i for i, _ in enumerate(x)]
plt.figure(figsize=(7, 7))
barlist = plt.bar(x_pos, y, width=0.8)
barlist[0].set_color('C0')
barlist[1].set_color('C1')
plt.xlabel("Synchronization type")
plt.ylabel("time (s)")
plt.title("Synchronization time with and without batch operations")
plt.xticks(x_pos, x)
plt.ylim = 1.0
plt.setp(plt.gca().get_xticklabels(), rotation=45, horizontalalignment='right')
plt.savefig(os.path.join(output_images_path, "barchart.png"), dpi=300, transparent=True)
plt.show()
plot_dbpedia_results(results_dbpedia_base, results_dbpedia_batch)
```
|
github_jupyter
|
import logging
import os
import sys
# set up module paths for imports
module_path = os.path.abspath(os.path.join('..'))
hercules_sync_path = os.path.abspath(os.path.join('..', 'hercules_sync'))
sys.path.append(module_path)
sys.path.append(hercules_sync_path)
# start logging system and set logging level
logger = logging.getLogger()
logger.setLevel(logging.ERROR)
logging.info("Starting logger")
output_images_path = "img"
%load_ext snakeviz
import matplotlib.pyplot as plt
plt.style.use('seaborn-paper')
%matplotlib inline
import bz2
import urllib.request
def read_zipped_dataset(url, decompressor=bz2):
response = urllib.request.urlopen(url)
content = bz2.decompress(response.read())
return content
def get_first_lines(string, num_lines):
return b'\n'.join(string.split(b'\n')[:num_lines])
DBPEDIA_PERSONDATA_URL = 'http://downloads.dbpedia.org/3.4/en/persondata_en.nt.bz2'
NUM_TRIPLES_FINAL = 5000
NUM_TRIPLES_PREVIEW = 100
dbpedia_dataset = read_zipped_dataset(DBPEDIA_PERSONDATA_URL, bz2)
dbpedia_dataset_preview = get_first_lines(dbpedia_dataset, NUM_TRIPLES_PREVIEW)
dbpedia_dataset_final = get_first_lines(dbpedia_dataset, NUM_TRIPLES_FINAL)
import subprocess
OUTPUT_FILE_NAME = "dataset.nt"
def gen_synthetic_data(num_products=100, bsbmtools_dir="bsbmtools-0.2"):
subprocess.call(f"cd {bsbmtools_dir} && ./generate -fc -pc {num_products}", shell=True)
with open(os.path.join(bsbmtools_dir, OUTPUT_FILE_NAME), 'r') as f:
res = f.read()
return res
data = [gen_synthetic_data(i) for i in range(1, 47, 5)]
from rdflib.graph import Graph
def count_triples(string):
graph = Graph().parse(format='n3', data=string)
return len(graph)
synthetic_data_final = {count_triples(string): string for string in data}
synthetic_data_final.keys()
from wbsync.external.uri_factory import URIFactoryMock
from wbsync.synchronization import GraphDiffSyncAlgorithm, OntologySynchronizer
algorithm = GraphDiffSyncAlgorithm()
synchronizer = OntologySynchronizer(algorithm)
factory = URIFactoryMock()
factory.reset_factory()
import time
from wbsync.triplestore import WikibaseAdapter
from secret import SSH_USER, SSH_PASS, USERNAME, PASSWORD
wikibase_host = '156.35.94.149'
ssh_port = '22'
mediawiki_api_url = f'http://{wikibase_host}:8181/w/api.php'
sparql_endpoint_url = f'http://{wikibase_host}:8282/proxy/wdqs/bigdata/namespace/wdq/sparql'
adapter = None
def reset_wb_state(factory):
global adapter
print("Resetting Wikibase state...")
subprocess.call('cd ~/wikibase-docker && sh clean_restart.sh', shell=True)
print("Wikibase state has been reset")
factory.reset_factory()
time.sleep(15) # wait some time for docker containers to go up again
print("Logging to wikibase...")
adapter = WikibaseAdapter(mediawiki_api_url, sparql_endpoint_url, USERNAME, PASSWORD)
def execute_synchronization(source_content, target_content, synchronizer, adapter):
ops = synchronizer.synchronize(source_content, target_content)
for op in ops:
res = op.execute(adapter)
if not res.successful:
print(f"Error synchronizing triple: {res.message}")
reset_wb_state(factory)
%snakeviz execute_synchronization("", dbpedia_dataset_preview, synchronizer, adapter)
from wbsync.synchronization.operations import optimize_ops
def _synchronize(source_content, target_content, synchronizer, adapter, ops_callback):
ops = ops_callback(source_content, target_content)
for op in ops:
res = op.execute(adapter)
if not res.successful:
print(f"Error synchronizing triple: {res.message}")
def execute_basic_synchronization(source_content, target_content, synchronizer, adapter):
ops_callback = lambda f: synchronizer.synchronize(f)
return _synchronize(source_content, target_content, synchronizer, adapter, ops_callback)
def execute_batch_synchronization(source_content, target_content, synchronizer, adapter):
ops_callback = lambda f: optimize_ops(synchronizer.synchronize(f))
return _synchronize(source_content, target_content, synchronizer, adapter, ops_callback)
import timeit
from functools import partial
def time_synth_dataset(callback, setup, dataset_dict):
results = {}
for num_triples, data in dataset_dict.items():
setup()
print(f"Execute callback: {num_triples} number of triples")
elapsed_time = timeit.timeit(partial(callback, data), number=1)
print(f"Callback finished in {elapsed_time} seconds")
results[num_triples] = elapsed_time
return results
reset_state_callback = partial(reset_wb_state, factory=factory)
results_batch = time_synth_dataset(lambda f: execute_batch_synchronization("", f, synchronizer, adapter),
reset_state_callback, synthetic_data_final)
results_base = time_synth_dataset(lambda f: execute_basic_synchronization("", f, synchronizer, adapter),
reset_state_callback, synthetic_data_final)
print("Synthetic dataset")
print("-" * 25)
print("Batch times: ", results_batch)
print("Base times: ", results_base)
# line chart
def plot_synth_results(results_dict_base, results_dict_batch):
x1, y1 = results_dict_base.keys(), results_dict_base.values()
x2, y2 = results_dict_batch.keys(), results_dict_batch.values()
plt.figure(figsize=(15, 7))
plt.xlabel("Number of triples")
plt.ylabel("time (s)")
plt.title("Synchronization time with and without batch operations")
plt.plot(list(x1), list(y1), label="Base sync")
plt.plot(list(x2), list(y2), label="Batch sync")
plt.legend(loc="upper left")
plt.savefig(os.path.join(output_images_path, "linechart.png"), dpi=300, transparent=True)
plt.show()
plot_synth_results(results_base, results_batch)
reset_wb_state(factory)
results_dbpedia_base = timeit.timeit(partial(execute_basic_synchronization, "", dbpedia_dataset_final, synchronizer, adapter), number=1)
reset_wb_state(factory)
results_dbpedia_batch = timeit.timeit(partial(execute_batch_synchronization, "", dbpedia_dataset_final, synchronizer, adapter), number=1)
print("Real dataset (DBpedia)")
print("-" * 35)
print(f"Batch times: {results_dbpedia_batch}s")
print(f"Base times: {results_dbpedia_base}s")
# bar chart
def plot_dbpedia_results(time_base, time_batch):
x = ['Basic', 'Batch']
y = [time_base, time_batch]
x_pos = [i for i, _ in enumerate(x)]
plt.figure(figsize=(7, 7))
barlist = plt.bar(x_pos, y, width=0.8)
barlist[0].set_color('C0')
barlist[1].set_color('C1')
plt.xlabel("Synchronization type")
plt.ylabel("time (s)")
plt.title("Synchronization time with and without batch operations")
plt.xticks(x_pos, x)
plt.ylim = 1.0
plt.setp(plt.gca().get_xticklabels(), rotation=45, horizontalalignment='right')
plt.savefig(os.path.join(output_images_path, "barchart.png"), dpi=300, transparent=True)
plt.show()
plot_dbpedia_results(results_dbpedia_base, results_dbpedia_batch)
| 0.367951 | 0.925769 |
# Transformace

## 1. Vlastní funkce
Příklad: funkce `round`, `len`, `random`, `open`.
* Funkce je poměrně základním konceptem v Pythonu a programování vůbec.
* Dosud jsme jich potkali spoustu, jak přímo z Pythonu nebo z nějaké knihovny (`requests`, `pandas`, ...).
* Je možné definovat si vlastní funkce.
* Umožňuje definovat blok kódu provádějící určitou specializovanou činnost, která se opakuje.
* Přesné chování funkce je závislé na vstupních parametrech.
* Vrací hodnotu jako výsledek.

Z kodim.cz ukradneme nápad na funkci pro výpočet obsahu trojúhelníku.
```
height1 = 3
width1 = 6
area1 = (height1 * width1) / 2
print(area1)
height2 = 4
width2 = 3
area2 = (height2 * width2) / 2
print(area2)
```
Pokud potřebujeme spočítat obsah mnoha trojúhelníků, začne být otravné vypisovat pořád ten samý kód dokola. Navíc při častém opisování toho samého snadno uděláme chybu.
Definujeme si funkci, která v parametrech dostane rozměry trojúhelníku a jako výsledek vrátí jeho obsah.
```
def triangle_area(height, width):
return (height * width) / 2
area1 = triangle_area(3, 6)
print(area1)
area2 = triangle_area(4, 3)
print(area2)
```
Formát definice funkce je následující.
```
def <nazev_funkce>(<parametry>):
<libovolny kod>
```
* Názvy funkcí se řídí stejnými pravidly, jako názvy proměnných.
* Funkce může mít libovolný počet parametrů. Pokud nemá žádné, kulaté závorky necháme prázdné (být tam ale musejí).
* Uvnitř těla funkce (odsazené pod jejím názvem a parametry) může být libovolný kód.
* Proměnné definované uvnitř funkce existují pouze tam, zvenčí nejsou viditelné.
* Pokud funkce vrací nějaký výsledek, uvedeme na konci `return <vysledek>`.
### Součet kladných hodnot
```
def sum_positive(values):
result = 0
for value in values:
if value > 0:
result += value
return result
test1 = [1, 2, 3, 4, 5]
python_sum1 = sum(test1)
my_sum1 = sum_positive(test1)
print(f"Python sum: {python_sum1}")
print(f"My sum: {my_sum1}")
test2 = [-1, 2, 3, -4, 5]
python_sum2 = sum(test2)
my_sum2 = sum_positive(test2)
print(f"Python sum: {python_sum2}")
print(f"My sum: {my_sum2}")
```
### Zápis do CSV
Funkce bez návratové hodnoty. Pozor, je velmi zjednodušená, v praxi by se měly použít pandas nebo pythonovský modul pro práci s CSV soubory.
```
def write_to_csv(path, nested_list):
f = open(path, mode="w", encoding="utf-8")
for inner_list in nested_list:
str_list = [str(v) for v in inner_list]
f.write(",".join(str_list))
f.write("\n")
f.close()
data = [["hello", "python"], ["goodbye", "python"], [123, 456]]
write_to_csv("test.csv", data)
!cat test.csv
```
### Převod školních známek
Funkce mohou také zpřehlednit kód. Použijeme příklad z dřívejška s převodem známek z písmen na čísla.
```
grades = ["A", "A", "B", "A", "C", "D", "E", "C", "B", "A", "C"]
converted1 = [grade.replace("A", "1").replace("B", "2").replace("C", "3").replace("D", "4").replace("E", "5")
for grade in grades]
print(converted1)
def convert_grade(grade):
return grade.replace("A", "1").replace("B", "2").replace("C", "3").replace("D", "4").replace("E", "5")
converted2 = [convert_grade(grade) for grade in grades]
print(converted2)
def convert_grade_better(grade):
if grade == "A":
return "1"
elif grade == "B":
return "2"
elif grade == "C":
return "3"
elif grade == "D":
return "4"
else:
return "5"
converted3 = [convert_grade_better(grade) for grade in grades]
print(converted3)
```
## Cvičení
## 2. Transformace dat v Pandas
Budeme se snažit převést špatně formátovaná data do tvaru, se kterým se lépe pracuje. Něco takového řešíme v praxi v jednom kuse -- dostaneme horu dat, nic o nich nevíme a potřebujeme v nich udělat pořádek.
Pracujeme s daty o hmotnosti Kristiána během 14 dnů, kdy se snažil zhubnout.
```
!cat vaha.txt
import pandas
vaha = pandas.read_csv("vaha.txt", encoding="utf-8", sep="\t")
vaha
vaha.dtypes
```
Conclusion: Kristián je trochu prase, aneb data v praxi.
Nejprve transformujeme sloupec `den` na číslo dne. Sloupec `den` je uložen vždy jako `2pismenny_nazev_dne cislo_dne`, některá čísla navíc končí tečkou.
### Zahodíme názvy dnů
```
dny = vaha["den"]
dny
cislo_dne = dny.str[3:]
cislo_dne
```
### Zahodíme tečky
```
cislo_dne = cislo_dne.str.replace(".", "")
cislo_dne
```
### Převedeme na čísla
```
cislo_dne = pandas.to_numeric(cislo_dne)
cislo_dne
```
### Uložíme ještě název dne
```
dny
nazev_dne = dny.str[:2]
nazev_dne
```
### Obojí uložíme do dataframe
```
vaha.drop("den", axis="columns", inplace=True)
vaha
vaha["číslo_dne"] = cislo_dne
vaha["název_dne"] = nazev_dne
vaha
vaha.dtypes
```
Tím máme slušně uložené údaje o dnech.
## 3. Chroustání Series
Zde budeme transformovat sloupec `váha`.
```
vaha
```
Jak se píše na kodim.cz: *Pokud se nám podaří rozdělit hodnotu podle mezery, můžeme první část převést na číslo. Pokud se to nepovede, můžeme dělit podle písmenka 'k'*
Tento postup si definujeme jako funkci, kterou aplikujeme na sloupec `váha` prvek po prvku.
```
def prevod_vahy(vaha):
casti = vaha.split(" ")
if len(casti) < 2:
casti = vaha.split("k")
desetinna_tecka = casti[0].replace(",", ".")
return float(desetinna_tecka)
```
Takto definovanou funkci nyní použijeme jako parametr funkce `apply` volané na sloupci `váha`. Funkce `apply` vezme námi definovanou funkci, a aplikuje ji na každý prvek daného sloupce. Výsledkem je sloupec výsledků naší funkce.
```
vaha["váha"] = vaha["váha"].apply(prevod_vahy)
vaha
vaha.dtypes
```
Ještě by bylo fajn upravit sloupec `běh`.
```
def prevod_behu(beh):
if beh == "pauza" or beh == "paza":
return 0
elif pandas.isnull(beh):
return beh
else:
casti = beh.split(" ")
if len(casti) < 2:
casti = beh.split("k")
return float(casti[0])
vaha["běh"] = vaha["běh"].apply(prevod_behu)
vaha
vaha.dtypes
```
## 4. Vlastní agregační funkce
Definujeme si vlastní agregační funkci, podobně jako jsme posledně viděli funkce `mean`, `std` a podobné, volané na výsledku operace `groupby`.
Data seskupíme podle týdne a v každém týdnu napočítáme nějakou funkci váhy. Zde např. průměrnou váhu v každém týdnu.
```
vaha.groupby("týden")["váha"].mean()
```
Budeme počítat tzv. rozpětí (spread) -- rozdíl mezi maximální a minimální hodnotou. Takovou funkci pandas nenabízí, musíme si ji tedy napsat sami.
Funkce jako argument dostane pandas series (sloupec) a jako výsledek vrátí jeho rozpětí.
```
def spread(series):
return series.max() - series.min()
```
Nejprve funkci aplikujeme na jediný sloupec. Tím dostaneme rozpětí vah za celé sledované období.
```
vaha["váha"].agg(spread)
```
A teď na groupby objekt -- za každý týden dostaneme rozpětí vah.
```
vaha.groupby("týden")["váha"].agg(spread)
```
|
github_jupyter
|
height1 = 3
width1 = 6
area1 = (height1 * width1) / 2
print(area1)
height2 = 4
width2 = 3
area2 = (height2 * width2) / 2
print(area2)
def triangle_area(height, width):
return (height * width) / 2
area1 = triangle_area(3, 6)
print(area1)
area2 = triangle_area(4, 3)
print(area2)
def <nazev_funkce>(<parametry>):
<libovolny kod>
def sum_positive(values):
result = 0
for value in values:
if value > 0:
result += value
return result
test1 = [1, 2, 3, 4, 5]
python_sum1 = sum(test1)
my_sum1 = sum_positive(test1)
print(f"Python sum: {python_sum1}")
print(f"My sum: {my_sum1}")
test2 = [-1, 2, 3, -4, 5]
python_sum2 = sum(test2)
my_sum2 = sum_positive(test2)
print(f"Python sum: {python_sum2}")
print(f"My sum: {my_sum2}")
def write_to_csv(path, nested_list):
f = open(path, mode="w", encoding="utf-8")
for inner_list in nested_list:
str_list = [str(v) for v in inner_list]
f.write(",".join(str_list))
f.write("\n")
f.close()
data = [["hello", "python"], ["goodbye", "python"], [123, 456]]
write_to_csv("test.csv", data)
!cat test.csv
grades = ["A", "A", "B", "A", "C", "D", "E", "C", "B", "A", "C"]
converted1 = [grade.replace("A", "1").replace("B", "2").replace("C", "3").replace("D", "4").replace("E", "5")
for grade in grades]
print(converted1)
def convert_grade(grade):
return grade.replace("A", "1").replace("B", "2").replace("C", "3").replace("D", "4").replace("E", "5")
converted2 = [convert_grade(grade) for grade in grades]
print(converted2)
def convert_grade_better(grade):
if grade == "A":
return "1"
elif grade == "B":
return "2"
elif grade == "C":
return "3"
elif grade == "D":
return "4"
else:
return "5"
converted3 = [convert_grade_better(grade) for grade in grades]
print(converted3)
!cat vaha.txt
import pandas
vaha = pandas.read_csv("vaha.txt", encoding="utf-8", sep="\t")
vaha
vaha.dtypes
dny = vaha["den"]
dny
cislo_dne = dny.str[3:]
cislo_dne
cislo_dne = cislo_dne.str.replace(".", "")
cislo_dne
cislo_dne = pandas.to_numeric(cislo_dne)
cislo_dne
dny
nazev_dne = dny.str[:2]
nazev_dne
vaha.drop("den", axis="columns", inplace=True)
vaha
vaha["číslo_dne"] = cislo_dne
vaha["název_dne"] = nazev_dne
vaha
vaha.dtypes
vaha
def prevod_vahy(vaha):
casti = vaha.split(" ")
if len(casti) < 2:
casti = vaha.split("k")
desetinna_tecka = casti[0].replace(",", ".")
return float(desetinna_tecka)
vaha["váha"] = vaha["váha"].apply(prevod_vahy)
vaha
vaha.dtypes
def prevod_behu(beh):
if beh == "pauza" or beh == "paza":
return 0
elif pandas.isnull(beh):
return beh
else:
casti = beh.split(" ")
if len(casti) < 2:
casti = beh.split("k")
return float(casti[0])
vaha["běh"] = vaha["běh"].apply(prevod_behu)
vaha
vaha.dtypes
vaha.groupby("týden")["váha"].mean()
def spread(series):
return series.max() - series.min()
vaha["váha"].agg(spread)
vaha.groupby("týden")["váha"].agg(spread)
| 0.314261 | 0.932145 |
### Analytics Vidhya: Practice Problem (Approach)
```
import os
import re
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
!ls /home/pratos/Side-Project/av_articles/data/
```
Download the training & test data from the Practice Problem approach. We'll do a bit of quick investigation on the dataset:
```
data = pd.read_csv('../data/training.csv')
data.head()
print("Shape of the data is:{}".format(data.shape))
print("List of columns is: {}".format(list(data.columns)))
```
Here, `Loan_status` is our `target variable`, the rest are `predictor variables`. `Loan_ID` wouldn't help much in making predictions about `defaulters` hence we won't be considering that variable in our final model.
Finding out the `null/Nan` values in the columns:
```
for _ in data.columns:
print("The number of null values in:{} == {}".format(_, data[_].isnull().sum()))
```
We'll check out the values (labels) for the columns having missing values:
```
missing_pred = ['Dependents', 'Self_Employed', 'Loan_Amount_Term', 'Gender', 'Married']
for _ in missing_pred:
print("List of unique labels for {}:::{}".format(_, set(data[_])))
```
For the rest of the missing values:
- `Dependents`: Assumption that there are no dependents
- `Self_Employed`: Assumption that the applicant is not self-employed
- `Loan_Amount_Term`: Assumption that the loan amount term is median value
- `Credit_History`: Assumption that the person has a credit history
- `Married`: If nothing specified, applicant is not married
- `Gender`: Assuming the gender is Male for the missing values
Before that we'll divide the dataset in train and test
```
from sklearn.model_selection import train_test_split
list(data.columns)
pred_var = ['Gender','Married','Dependents','Education','Self_Employed','ApplicantIncome','CoapplicantIncome',\
'LoanAmount','Loan_Amount_Term','Credit_History','Property_Area']
X_train, X_test, y_train, y_test = train_test_split(data[pred_var], data['Loan_Status'], \
test_size=0.25, random_state=42)
```
We'll compile a list of `pre-processing` steps that we do on to create a custom `estimator`.
```
X_train['Dependents'] = X_train['Dependents'].fillna('0')
X_train['Self_Employed'] = X_train['Self_Employed'].fillna('No')
X_train['Loan_Amount_Term'] = X_train['Loan_Amount_Term'].fillna(X_train['Loan_Amount_Term'].mean())
X_train['Credit_History'] = X_train['Credit_History'].fillna(1)
X_train['Married'] = X_train['Married'].fillna('No')
X_train['Gender'] = X_train['Gender'].fillna('Male')
X_train['LoanAmount'] = X_train['LoanAmount'].fillna(X_train['LoanAmount'].mean())
```
We have a lot of `string` labels that we encounter in `Gender`, `Married`, `Education`, `Self_Employed` & `Property_Area` columns.
```
label_columns = ['Gender', 'Married', 'Education', 'Self_Employed', 'Property_Area', 'Dependents']
for _ in label_columns:
print("List of unique labels {}:{}".format(_, set(X_train[_])))
gender_values = {'Female' : 0, 'Male' : 1}
married_values = {'No' : 0, 'Yes' : 1}
education_values = {'Graduate' : 0, 'Not Graduate' : 1}
employed_values = {'No' : 0, 'Yes' : 1}
property_values = {'Rural' : 0, 'Urban' : 1, 'Semiurban' : 2}
dependent_values = {'3+': 3, '0': 0, '2': 2, '1': 1}
X_train.replace({'Gender': gender_values, 'Married': married_values, 'Education': education_values, \
'Self_Employed': employed_values, 'Property_Area': property_values, 'Dependents': dependent_values}\
, inplace=True)
X_train.head()
X_train.dtypes
for _ in X_train.columns:
print("The number of null values in:{} == {}".format(_, X_train[_].isnull().sum()))
```
Converting the pandas dataframes to numpy arrays:
```
X_train = X_train.as_matrix()
X_train.shape
```
We'll create a custom `pre-processing estimator` that would help us in writing better pipelines and in future deployments:
```
from sklearn.base import BaseEstimator, TransformerMixin
class PreProcessing(BaseEstimator, TransformerMixin):
"""Custom Pre-Processing estimator for our use-case
"""
def __init__(self):
pass
def transform(self, df):
"""Regular transform() that is a help for training, validation & testing datasets
(NOTE: The operations performed here are the ones that we did prior to this cell)
"""
pred_var = ['Gender','Married','Dependents','Education','Self_Employed','ApplicantIncome','CoapplicantIncome',\
'LoanAmount','Loan_Amount_Term','Credit_History','Property_Area']
df = df[pred_var]
df['Dependents'] = df['Dependents'].fillna(0)
df['Self_Employed'] = df['Self_Employed'].fillna('No')
df['Loan_Amount_Term'] = df['Loan_Amount_Term'].fillna(self.term_mean_)
df['Credit_History'] = df['Credit_History'].fillna(1)
df['Married'] = df['Married'].fillna('No')
df['Gender'] = df['Gender'].fillna('Male')
df['LoanAmount'] = df['LoanAmount'].fillna(self.amt_mean_)
gender_values = {'Female' : 0, 'Male' : 1}
married_values = {'No' : 0, 'Yes' : 1}
education_values = {'Graduate' : 0, 'Not Graduate' : 1}
employed_values = {'No' : 0, 'Yes' : 1}
property_values = {'Rural' : 0, 'Urban' : 1, 'Semiurban' : 2}
dependent_values = {'3+': 3, '0': 0, '2': 2, '1': 1}
df.replace({'Gender': gender_values, 'Married': married_values, 'Education': education_values, \
'Self_Employed': employed_values, 'Property_Area': property_values, \
'Dependents': dependent_values}, inplace=True)
return df.as_matrix()
def fit(self, df, y=None, **fit_params):
"""Fitting the Training dataset & calculating the required values from train
e.g: We will need the mean of X_train['Loan_Amount_Term'] that will be used in
transformation of X_test
"""
self.term_mean_ = df['Loan_Amount_Term'].mean()
self.amt_mean_ = df['LoanAmount'].mean()
return self
```
To make sure that this works, let's do a test run for it:
```
X_train, X_test, y_train, y_test = train_test_split(data[pred_var], data['Loan_Status'], \
test_size=0.25, random_state=42)
X_train.head()
for _ in X_train.columns:
print("The number of null values in:{} == {}".format(_, X_train[_].isnull().sum()))
preprocess = PreProcessing()
preprocess
preprocess.fit(X_train)
X_train_transformed = preprocess.transform(X_train)
X_train_transformed.shape
```
So our small experiment to write a custom `estimator` worked. This would be helpful further.
```
X_test_transformed = preprocess.transform(X_test)
X_test_transformed.shape
y_test = y_test.replace({'Y':1, 'N':0}).as_matrix()
y_train = y_train.replace({'Y':1, 'N':0}).as_matrix()
param_grid = {"randomforestclassifier__n_estimators" : [10, 20, 30],
"randomforestclassifier__max_depth" : [None, 6, 8, 10],
"randomforestclassifier__max_leaf_nodes": [None, 5, 10, 20],
"randomforestclassifier__min_impurity_split": [0.1, 0.2, 0.3]}
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import RandomForestClassifier
pipe = make_pipeline(PreProcessing(),
RandomForestClassifier())
pipe
from sklearn.model_selection import train_test_split, GridSearchCV
grid = GridSearchCV(pipe, param_grid=param_grid, cv=3)
grid
X_train, X_test, y_train, y_test = train_test_split(data[pred_var], data['Loan_Status'], \
test_size=0.25, random_state=42)
grid.fit(X_train, y_train)
print("Best parameters: {}".format(grid.best_params_))
print("Test set score: {:.2f}".format(grid.score(X_test, y_test)))
```
|
github_jupyter
|
import os
import re
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
!ls /home/pratos/Side-Project/av_articles/data/
data = pd.read_csv('../data/training.csv')
data.head()
print("Shape of the data is:{}".format(data.shape))
print("List of columns is: {}".format(list(data.columns)))
for _ in data.columns:
print("The number of null values in:{} == {}".format(_, data[_].isnull().sum()))
missing_pred = ['Dependents', 'Self_Employed', 'Loan_Amount_Term', 'Gender', 'Married']
for _ in missing_pred:
print("List of unique labels for {}:::{}".format(_, set(data[_])))
from sklearn.model_selection import train_test_split
list(data.columns)
pred_var = ['Gender','Married','Dependents','Education','Self_Employed','ApplicantIncome','CoapplicantIncome',\
'LoanAmount','Loan_Amount_Term','Credit_History','Property_Area']
X_train, X_test, y_train, y_test = train_test_split(data[pred_var], data['Loan_Status'], \
test_size=0.25, random_state=42)
X_train['Dependents'] = X_train['Dependents'].fillna('0')
X_train['Self_Employed'] = X_train['Self_Employed'].fillna('No')
X_train['Loan_Amount_Term'] = X_train['Loan_Amount_Term'].fillna(X_train['Loan_Amount_Term'].mean())
X_train['Credit_History'] = X_train['Credit_History'].fillna(1)
X_train['Married'] = X_train['Married'].fillna('No')
X_train['Gender'] = X_train['Gender'].fillna('Male')
X_train['LoanAmount'] = X_train['LoanAmount'].fillna(X_train['LoanAmount'].mean())
label_columns = ['Gender', 'Married', 'Education', 'Self_Employed', 'Property_Area', 'Dependents']
for _ in label_columns:
print("List of unique labels {}:{}".format(_, set(X_train[_])))
gender_values = {'Female' : 0, 'Male' : 1}
married_values = {'No' : 0, 'Yes' : 1}
education_values = {'Graduate' : 0, 'Not Graduate' : 1}
employed_values = {'No' : 0, 'Yes' : 1}
property_values = {'Rural' : 0, 'Urban' : 1, 'Semiurban' : 2}
dependent_values = {'3+': 3, '0': 0, '2': 2, '1': 1}
X_train.replace({'Gender': gender_values, 'Married': married_values, 'Education': education_values, \
'Self_Employed': employed_values, 'Property_Area': property_values, 'Dependents': dependent_values}\
, inplace=True)
X_train.head()
X_train.dtypes
for _ in X_train.columns:
print("The number of null values in:{} == {}".format(_, X_train[_].isnull().sum()))
X_train = X_train.as_matrix()
X_train.shape
from sklearn.base import BaseEstimator, TransformerMixin
class PreProcessing(BaseEstimator, TransformerMixin):
"""Custom Pre-Processing estimator for our use-case
"""
def __init__(self):
pass
def transform(self, df):
"""Regular transform() that is a help for training, validation & testing datasets
(NOTE: The operations performed here are the ones that we did prior to this cell)
"""
pred_var = ['Gender','Married','Dependents','Education','Self_Employed','ApplicantIncome','CoapplicantIncome',\
'LoanAmount','Loan_Amount_Term','Credit_History','Property_Area']
df = df[pred_var]
df['Dependents'] = df['Dependents'].fillna(0)
df['Self_Employed'] = df['Self_Employed'].fillna('No')
df['Loan_Amount_Term'] = df['Loan_Amount_Term'].fillna(self.term_mean_)
df['Credit_History'] = df['Credit_History'].fillna(1)
df['Married'] = df['Married'].fillna('No')
df['Gender'] = df['Gender'].fillna('Male')
df['LoanAmount'] = df['LoanAmount'].fillna(self.amt_mean_)
gender_values = {'Female' : 0, 'Male' : 1}
married_values = {'No' : 0, 'Yes' : 1}
education_values = {'Graduate' : 0, 'Not Graduate' : 1}
employed_values = {'No' : 0, 'Yes' : 1}
property_values = {'Rural' : 0, 'Urban' : 1, 'Semiurban' : 2}
dependent_values = {'3+': 3, '0': 0, '2': 2, '1': 1}
df.replace({'Gender': gender_values, 'Married': married_values, 'Education': education_values, \
'Self_Employed': employed_values, 'Property_Area': property_values, \
'Dependents': dependent_values}, inplace=True)
return df.as_matrix()
def fit(self, df, y=None, **fit_params):
"""Fitting the Training dataset & calculating the required values from train
e.g: We will need the mean of X_train['Loan_Amount_Term'] that will be used in
transformation of X_test
"""
self.term_mean_ = df['Loan_Amount_Term'].mean()
self.amt_mean_ = df['LoanAmount'].mean()
return self
X_train, X_test, y_train, y_test = train_test_split(data[pred_var], data['Loan_Status'], \
test_size=0.25, random_state=42)
X_train.head()
for _ in X_train.columns:
print("The number of null values in:{} == {}".format(_, X_train[_].isnull().sum()))
preprocess = PreProcessing()
preprocess
preprocess.fit(X_train)
X_train_transformed = preprocess.transform(X_train)
X_train_transformed.shape
X_test_transformed = preprocess.transform(X_test)
X_test_transformed.shape
y_test = y_test.replace({'Y':1, 'N':0}).as_matrix()
y_train = y_train.replace({'Y':1, 'N':0}).as_matrix()
param_grid = {"randomforestclassifier__n_estimators" : [10, 20, 30],
"randomforestclassifier__max_depth" : [None, 6, 8, 10],
"randomforestclassifier__max_leaf_nodes": [None, 5, 10, 20],
"randomforestclassifier__min_impurity_split": [0.1, 0.2, 0.3]}
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import RandomForestClassifier
pipe = make_pipeline(PreProcessing(),
RandomForestClassifier())
pipe
from sklearn.model_selection import train_test_split, GridSearchCV
grid = GridSearchCV(pipe, param_grid=param_grid, cv=3)
grid
X_train, X_test, y_train, y_test = train_test_split(data[pred_var], data['Loan_Status'], \
test_size=0.25, random_state=42)
grid.fit(X_train, y_train)
print("Best parameters: {}".format(grid.best_params_))
print("Test set score: {:.2f}".format(grid.score(X_test, y_test)))
| 0.45641 | 0.917598 |
## Logistic Regression
Credits: Notebook based on A.Géron Hands-On Machine Learning with Scikit-Learn and TensorFlow: Concepts, Tools, and Techniques to Build Intelligent Systems.
Logistic Regression model computes a weighted sum of input feature and outputs the logistic of the results.


Once the model has estimated the probability of belonging to the positive class, it can make a prediction. Simply if the y is greater/equal 0.5, it should be classified as 1, 0 otherwise
Training: the purpose of training is that the model estimated hihj probabilities for positive instances and low for negative. Training is performed with cost function:


```
import numpy as np
class LogisticClassifier:
def __init__(self, lr=1e-4, n_iters=10000):
self.is_fitted=False
self.theta=None
self.n_iters=n_iters
self.lr=lr
@staticmethod
def logistic(X):
return 1/(1+ np.exp(-X))
def fit(self,X,y):
self.is_fitted=True
n_samples, n_features = X.shape
self.theta=np.random.randn(n_features+1,1) # plus biased, column vector
X=np.c_[np.ones((n_samples, 1)), X]
for iter in range(self.n_iters):
gradients=(1/n_samples) *X.T.dot( self.logistic( X.dot( self.theta) ) -y )
self.theta-=self.lr*gradients
def predict(self,X):
if self.is_fitted:
X=np.c_[np.ones((X.shape[0], 1)), X]
dot=X.dot(self.theta)
return (dot>=0).astype(np.int)
else:
print("Model must be fitted before prediction")
def predict_proba(self,X):
if self.is_fitted:
X=np.c_[np.ones((X.shape[0], 1)), X]
return self.logistic( X.dot( self.theta) )
else:
print("Model must be fitted before prediction")
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
iris=datasets.load_iris()
X=iris['data'][:,3:]
y=(iris['target']==2).astype(np.int)
y=y.reshape((-1,1))
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=10)
log_clf = LogisticClassifier(n_iters=1000000)
log_clf.fit(X_train, y_train)
log_clf.theta
y_predicted=log_clf.predict(X_val)
accuracy_score(y_predicted,y_val)
import matplotlib.pyplot as plt
def plot_decision_boundaries(clf,X,labels,linewidth=2,fontsize=15):
if X.shape[1]==1:
X_new=np.linspace(X.min(),X.max(),num=len(X)*2 ).reshape(-1, 1)
y_proba=clf.predict_proba(X_new)
boundary= X_new[y_proba[:, 0] >= 0.5][0]
fig = plt.figure(figsize=(12, 6))
plt.plot(X_new,y_proba[:,0],'b-',linewidth=linewidth,label=labels['positive'])
plt.plot(X_new,(1-y_proba[:,0]),'r--',linewidth=linewidth,label=labels['negative'] )
plt.axvline(x=boundary)
plt.ylim([0, 1])
plt.xlabel(labels.get('x_label','X'),fontsize=fontsize )
plt.ylabel( labels.get('y_label','Y') ,fontsize=fontsize)
plt.legend(loc="center left", fontsize=fontsize)
plt.show()
else:
print("Incorrect shape of the dataset, cannot plot decision boundary")
labels={'positive':'Iris virginica','negative':'Not Iris virginica','x_label':'Petal Width','y_label':'Probability'}
plot_decision_boundaries(log_clf,X,labels=labels)
X = iris["data"][:, (2, 3)]
y = (iris["target"] == 2).astype(np.int)
y=y.reshape((-1,1))
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=10)
log_clf = LogisticClassifier(n_iters=1000000)
log_clf.fit(X_train, y_train)
log_clf.theta
y_predicted=log_clf.predict(X_val)
accuracy_score(y_predicted,y_val)
```
|
github_jupyter
|
import numpy as np
class LogisticClassifier:
def __init__(self, lr=1e-4, n_iters=10000):
self.is_fitted=False
self.theta=None
self.n_iters=n_iters
self.lr=lr
@staticmethod
def logistic(X):
return 1/(1+ np.exp(-X))
def fit(self,X,y):
self.is_fitted=True
n_samples, n_features = X.shape
self.theta=np.random.randn(n_features+1,1) # plus biased, column vector
X=np.c_[np.ones((n_samples, 1)), X]
for iter in range(self.n_iters):
gradients=(1/n_samples) *X.T.dot( self.logistic( X.dot( self.theta) ) -y )
self.theta-=self.lr*gradients
def predict(self,X):
if self.is_fitted:
X=np.c_[np.ones((X.shape[0], 1)), X]
dot=X.dot(self.theta)
return (dot>=0).astype(np.int)
else:
print("Model must be fitted before prediction")
def predict_proba(self,X):
if self.is_fitted:
X=np.c_[np.ones((X.shape[0], 1)), X]
return self.logistic( X.dot( self.theta) )
else:
print("Model must be fitted before prediction")
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
iris=datasets.load_iris()
X=iris['data'][:,3:]
y=(iris['target']==2).astype(np.int)
y=y.reshape((-1,1))
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=10)
log_clf = LogisticClassifier(n_iters=1000000)
log_clf.fit(X_train, y_train)
log_clf.theta
y_predicted=log_clf.predict(X_val)
accuracy_score(y_predicted,y_val)
import matplotlib.pyplot as plt
def plot_decision_boundaries(clf,X,labels,linewidth=2,fontsize=15):
if X.shape[1]==1:
X_new=np.linspace(X.min(),X.max(),num=len(X)*2 ).reshape(-1, 1)
y_proba=clf.predict_proba(X_new)
boundary= X_new[y_proba[:, 0] >= 0.5][0]
fig = plt.figure(figsize=(12, 6))
plt.plot(X_new,y_proba[:,0],'b-',linewidth=linewidth,label=labels['positive'])
plt.plot(X_new,(1-y_proba[:,0]),'r--',linewidth=linewidth,label=labels['negative'] )
plt.axvline(x=boundary)
plt.ylim([0, 1])
plt.xlabel(labels.get('x_label','X'),fontsize=fontsize )
plt.ylabel( labels.get('y_label','Y') ,fontsize=fontsize)
plt.legend(loc="center left", fontsize=fontsize)
plt.show()
else:
print("Incorrect shape of the dataset, cannot plot decision boundary")
labels={'positive':'Iris virginica','negative':'Not Iris virginica','x_label':'Petal Width','y_label':'Probability'}
plot_decision_boundaries(log_clf,X,labels=labels)
X = iris["data"][:, (2, 3)]
y = (iris["target"] == 2).astype(np.int)
y=y.reshape((-1,1))
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=10)
log_clf = LogisticClassifier(n_iters=1000000)
log_clf.fit(X_train, y_train)
log_clf.theta
y_predicted=log_clf.predict(X_val)
accuracy_score(y_predicted,y_val)
| 0.696991 | 0.503357 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.