code
stringlengths 2.5k
6.36M
| kind
stringclasses 2
values | parsed_code
stringlengths 0
404k
| quality_prob
float64 0
0.98
| learning_prob
float64 0.03
1
|
---|---|---|---|---|
```
import pandas as pd
import numpy as np
df_user_log = pd.read_csv("data_set/user_behavior_time_resampled.csv")
df_user_info = pd.read_csv("data_set/user_info.csv")
df_vip_user = pd.read_csv("data_set/vip_user.csv")
# 分别插入 click、cart、order、fav 四列,表示点击、加购物车、下单和收藏
# 规则见上面的文字描述
df_user_log["click"] = df_user_log.action_type.apply(lambda l:1 if l==0 else 0)
df_user_log["cart"] = df_user_log.action_type.apply(lambda l:1 if l==1 else 0)
df_user_log["order"] = df_user_log.action_type.apply(lambda l:1 if l==2 else 0)
df_user_log["fav"] = df_user_log.action_type.apply(lambda l:1 if l==3 else 0)
# 查看添加之后的 user_log 表
df_user_log
print("click 数据分布:\n",df_user_log.click.value_counts())
print("order 数据分布:\n",df_user_log.order.value_counts())
print("fav 数据分布:\n",df_user_log.fav.value_counts())
print("cart 数据分布:\n",df_user_log.cart.value_counts())
df_clean = df_user_log[["item_id", "click", "cart", "order","fav"]]
df_clean
df_item = df_clean.groupby(["item_id"]).sum()
df_item
df_vip_user
# 重命名 merchant_id 为 seller_id
df_vip_user = df_vip_user.rename( columns={"merchant_id" : "seller_id"})
# 选取 seller_id和label 这连烈,斌将结果通过 seller_id 聚合,相同 seller_id 的记录的label求和
df_brand_vip_users = df_vip_user[["seller_id", "label"]].groupby("seller_id").sum()
# 查看保存了店铺 vip 用户数的表的内容
df_brand_vip_users
# 筛选出 seller_id 和 item_id
df_seller_item_count = df_user_log[["seller_id", "item_id"]]
# 按item_id 去重,因为 itemid 一样的记录,seller_id 肯定也一样,所以 seller_id没有影响
df_seller_item_count = df_seller_item_count.drop_duplicates("item_id")
# 将 item_id 列赋值为 1,方便做求和
df_seller_item_count["item_id"] = 1
# 按seller_id 聚合,然后针对 item_id 列求和
df_seller_item_count = df_seller_item_count.groupby("seller_id").sum()
# 将 item_id 列改为 item_count, 避免有歧义
df_seller_item_count = df_seller_item_count.rename(columns = {"item_id" : "item_count"})
# 查看结果
df_seller_item_count
df_item
# 从原始行为表中取出 item_id 和seller_id ,构成新表
df_brand_item_map = df_user_log[["item_id", "seller_id"]]
# 按照item_id去重,去重后得到的结果就相当于是 item id 和 seller_id 的映射关系
df_brand_item_map = df_brand_item_map.drop_duplicates("item_id")
# 将 df_brand_item_map 映射进 df_item, 以 item_id 为 key
df_item = df_item.merge(df_brand_item_map, how="left", on="item_id")
# 查看最新的商品特征表
df_item
df_brand_item_map
# 将店铺VIP用户特征表拼接到商品特征表中,以 seller_id 为 key
df_item = df_item.merge(df_brand_vip_users,how = "left", on="seller_id")
df_item
# 用 0 填充缺失值
df_item["label"] = df_item["label"].fillna(0)
df_item
df_item.isnull().sum()
df_item = df_item.dropna()
df_item = df_item.merge(df_seller_item_count, how = "left", on = "seller_id")
df_item
df_item
# 导入分割的方法
from sklearn.model_selection import train_test_split
# 自变量的数据表
X = df_item.drop(columns=["order", "seller_id","item_id","label", "item_count"])
# 因变量
y = df_item["order"]
# 分别切割出训练集,测试集,测试集的比例是 20%
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state=42)
# 查看自变量的测试集合
X_train
y
X_train
X_train.isnull().sum()
linear_model = LinearRegression()
linear_model.fit(X_train, y_train)
# predict X_test 对应的 y 值
y_pred = linear_model.predict(X_test)
# 用 scores 方法,查看模型自变量和因变量的相关性
print("Scores:", linear_model.score(X_test, y_test))
# 查看 predict y 和 y test的平均绝对误差
print("MAE:", mean_absolute_error(y_test, y_pred))
# 查看模型的 b 值
print("intercept:", linear_model.intercept_)
# 查看模型的系数
print("coef_:", linear_model.coef_)
print(linear_model.predict([[20, 0 ,3, 10, 30]]))
```
|
github_jupyter
|
import pandas as pd
import numpy as np
df_user_log = pd.read_csv("data_set/user_behavior_time_resampled.csv")
df_user_info = pd.read_csv("data_set/user_info.csv")
df_vip_user = pd.read_csv("data_set/vip_user.csv")
# 分别插入 click、cart、order、fav 四列,表示点击、加购物车、下单和收藏
# 规则见上面的文字描述
df_user_log["click"] = df_user_log.action_type.apply(lambda l:1 if l==0 else 0)
df_user_log["cart"] = df_user_log.action_type.apply(lambda l:1 if l==1 else 0)
df_user_log["order"] = df_user_log.action_type.apply(lambda l:1 if l==2 else 0)
df_user_log["fav"] = df_user_log.action_type.apply(lambda l:1 if l==3 else 0)
# 查看添加之后的 user_log 表
df_user_log
print("click 数据分布:\n",df_user_log.click.value_counts())
print("order 数据分布:\n",df_user_log.order.value_counts())
print("fav 数据分布:\n",df_user_log.fav.value_counts())
print("cart 数据分布:\n",df_user_log.cart.value_counts())
df_clean = df_user_log[["item_id", "click", "cart", "order","fav"]]
df_clean
df_item = df_clean.groupby(["item_id"]).sum()
df_item
df_vip_user
# 重命名 merchant_id 为 seller_id
df_vip_user = df_vip_user.rename( columns={"merchant_id" : "seller_id"})
# 选取 seller_id和label 这连烈,斌将结果通过 seller_id 聚合,相同 seller_id 的记录的label求和
df_brand_vip_users = df_vip_user[["seller_id", "label"]].groupby("seller_id").sum()
# 查看保存了店铺 vip 用户数的表的内容
df_brand_vip_users
# 筛选出 seller_id 和 item_id
df_seller_item_count = df_user_log[["seller_id", "item_id"]]
# 按item_id 去重,因为 itemid 一样的记录,seller_id 肯定也一样,所以 seller_id没有影响
df_seller_item_count = df_seller_item_count.drop_duplicates("item_id")
# 将 item_id 列赋值为 1,方便做求和
df_seller_item_count["item_id"] = 1
# 按seller_id 聚合,然后针对 item_id 列求和
df_seller_item_count = df_seller_item_count.groupby("seller_id").sum()
# 将 item_id 列改为 item_count, 避免有歧义
df_seller_item_count = df_seller_item_count.rename(columns = {"item_id" : "item_count"})
# 查看结果
df_seller_item_count
df_item
# 从原始行为表中取出 item_id 和seller_id ,构成新表
df_brand_item_map = df_user_log[["item_id", "seller_id"]]
# 按照item_id去重,去重后得到的结果就相当于是 item id 和 seller_id 的映射关系
df_brand_item_map = df_brand_item_map.drop_duplicates("item_id")
# 将 df_brand_item_map 映射进 df_item, 以 item_id 为 key
df_item = df_item.merge(df_brand_item_map, how="left", on="item_id")
# 查看最新的商品特征表
df_item
df_brand_item_map
# 将店铺VIP用户特征表拼接到商品特征表中,以 seller_id 为 key
df_item = df_item.merge(df_brand_vip_users,how = "left", on="seller_id")
df_item
# 用 0 填充缺失值
df_item["label"] = df_item["label"].fillna(0)
df_item
df_item.isnull().sum()
df_item = df_item.dropna()
df_item = df_item.merge(df_seller_item_count, how = "left", on = "seller_id")
df_item
df_item
# 导入分割的方法
from sklearn.model_selection import train_test_split
# 自变量的数据表
X = df_item.drop(columns=["order", "seller_id","item_id","label", "item_count"])
# 因变量
y = df_item["order"]
# 分别切割出训练集,测试集,测试集的比例是 20%
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state=42)
# 查看自变量的测试集合
X_train
y
X_train
X_train.isnull().sum()
linear_model = LinearRegression()
linear_model.fit(X_train, y_train)
# predict X_test 对应的 y 值
y_pred = linear_model.predict(X_test)
# 用 scores 方法,查看模型自变量和因变量的相关性
print("Scores:", linear_model.score(X_test, y_test))
# 查看 predict y 和 y test的平均绝对误差
print("MAE:", mean_absolute_error(y_test, y_pred))
# 查看模型的 b 值
print("intercept:", linear_model.intercept_)
# 查看模型的系数
print("coef_:", linear_model.coef_)
print(linear_model.predict([[20, 0 ,3, 10, 30]]))
| 0.250913 | 0.173708 |
```
# Import required libraries and dependencies
import pandas as pd
import hvplot.pandas
from path import Path
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
# Load the data into a Pandas DataFrame
df_market_data = pd.read_csv(
Path("Resources/crypto_market_data.csv"),
index_col="coin_id")
# Display sample data
df_market_data.head(10)
# Generate summary statistics
df_market_data.describe()
# Plot your data to see what's in your DataFrame
df_market_data.hvplot.line(
width=800,
height=400,
rot=90
)
# Use the `StandardScaler()` module from scikit-learn to normalize the data from the CSV file
scaled_data = StandardScaler().fit_transform(df_market_data)
# Create a DataFrame with the scaled data
df_market_data_scaled = pd.DataFrame(
scaled_data,
columns=df_market_data.columns
)
# Copy the crypto names from the original data
df_market_data_scaled["coin_id"] = df_market_data.index
# Set the coinid column as index
df_market_data_scaled = df_market_data_scaled.set_index("coin_id")
# Display sample data
df_market_data_scaled.head()
```
---
```
# Initialize the K-Means model with four clusters
model = KMeans(n_clusters=4)
# Fit the K-Means model using the scaled data
model.fit(df_market_data_scaled)
# Predict the clusters to group the cryptocurrencies using the scaled data
crypto_clusters_k4 = model.predict(df_market_data_scaled)
# View the resulting array of cluster values.
print(crypto_clusters_k4)
# Add a new column to the DataFrame with the predicted clusters with k=4
df_market_data_scaled["crypto_cluster_k4"] = crypto_clusters_k4
# Display sample data
df_market_data_scaled.head()
# Create a scatter plot using hvPlot by setting
# `x="price_change_percentage_14d"` and `y="price_change_percentage_1y"`.
# Group the results by the clusters using `by="crypto_cluster_k4".
# Set the hover to the coin id using `hover_cols=["coin_id"]`.
df_market_data_scaled.hvplot.scatter(
x="price_change_percentage_14d",
y="price_change_percentage_1y",
by="crypto_cluster_k4",
hover_cols=["coin_id"],
marker=["hex", "square", "cross", "inverted_triangle", "triangle"],
)
```
---
```
# Create a list with the number of k-values to try
# Use a range from 1 to 11
k = list(range(1, 11))
# Create an empy list to store the inertia values
inertia = []
# Create a for loop to compute the inertia with each possible value of k
# Inside the loop:
# 1. Create a KMeans model using the loop counter for the n_clusters
# 2. Fit the model to the data using `df_market_data_scaled`
# 3. Append the model.inertia_ to the inirtia list
for i in k:
model = KMeans(n_clusters=i, random_state=0)
model.fit(df_market_data_scaled)
inertia.append(model.inertia_)
# Create a dictionary with the data to plot the Elbow curve
elbow_data = {
"k": k,
"inertia": inertia
}
# Create a DataFrame with the data to plot the Elbow curve
df_elbow = pd.DataFrame(elbow_data)
# Plot a line chart with all the inertia values computed with
# the different values of k to visually identify the optimal value for k.
df_elbow.hvplot(x='k', y='inertia', title='Elbow Curve', xticks=k)
```
**Question:** What is the best value for `k`?
**Answer:** Based on the elbow curve, the best value for k is 5.
---
```
# Create a PCA model instance and set `n_components=3`.
pca = PCA(n_components=3)
# Use the PCA model with `fit_transform` to reduce to
# three principal components.
market_pca_data = pca.fit_transform(df_market_data_scaled)
# View the first five rows of the DataFrame.
market_pca_data[:5]
# Retrieve the explained variance to determine how much information
# can be attributed to each principal component.
pca.explained_variance_ratio_
```
**Question** What is the total explained variance of the three principal components?
**Answer** 89%. To find the total explained variance we add up the principal components, the first has 36.94% of the variance, the second has 29.17%, the third has 22.89%, which sum up to 89%.
```
# Creating a DataFrame with the PCA data
df_market_data_pca = pd.DataFrame(
market_pca_data,
columns=["PC1", "PC2", "PC3"]
)
# Copy the crypto names from the original data
df_market_data_pca["coin_id"] = df_market_data.index
# Set the coinid column as index
df_market_data_pca = df_market_data_pca.set_index("coin_id")
# Display sample data
df_market_data_pca.head()
# Initiate a new K-Means algorithm using the PCA DataFrame to group
# the cryptocurrencies. Set the `n_clusters` parameter equal to
# the best value for `k` found before. View the resulting array.
# Initialize the K-Means model
model = KMeans(n_clusters=5)
# Fit the model
model.fit(df_market_data_pca)
# Predict clusters
crypto_clusters_k5 = model.predict(df_market_data_pca)
# View the resulting array
crypto_clusters_k5
# - From the original DataFrame, add the `price_change_percentage_1y` and `price_change_percentage_14d` columns.
# - Add a column with the predicted cluster values identified using a k value of 4. (The predicted cluster values were calculated in the “Cluster Cryptocurrencies with K-means” section.)
# - Add a column with the predicted cluster values identified using the optimal value for k.
# Add the price_change_percentage_1y column from the original data
df_market_data_pca["price_change_percentage_1y"] = df_market_data["price_change_percentage_1y"]
# Add the price_change_percentage_14d column from the original data
df_market_data_pca["price_change_percentage_14d"] = df_market_data["price_change_percentage_14d"]
# Add a new column to the DataFrame with the predicted clusters using the best value of k
df_market_data_pca["crypto_cluster_k5"] = crypto_clusters_k5
# Add a new column to the DataFrame with the predicted clusters using k=4
df_market_data_pca["crypto_cluster_k4"] = crypto_clusters_k4
# Display sample data
df_market_data_pca.head()
```
---
```
# Create a scatter plot for the Crypto Clusters using k=4 data.
# Use the PCA data to create a scatter plot with hvPlot by setting
# x="price_change_percentage_14d" and y="price_change_percentage_1y".
# Group by the clusters using `by="crypto_cluster_k4".
# Set the hover colors to the coin id with `hover_cols=["coin_id"]
# Create a descriptive title for the plot using the title parameter.
scatter_plot_k4 = df_market_data_pca.hvplot.scatter(
x="price_change_percentage_14d",
y="price_change_percentage_1y",
by="crypto_cluster_k4",
hover_cols=["coin_id"],
title='Clustered k=4 Market Data'
)
scatter_plot_k4
# Create a scatter plot for the Crypto Clusters using k=5 data.
# Use the PCA data to create a scatter plot with hvPlot by setting
# x="price_change_percentage_14d" and y="price_change_percentage_1y".
# Group by the clusters using `by="crypto_cluster_k5".
# Set the hover colors to the coin id with `hover_cols=["coin_id"]
# Create a descriptive title for the plot using the title parameter.
scatter_plot_k5 = df_market_data_pca.hvplot.scatter(
x="price_change_percentage_14d",
y="price_change_percentage_1y",
by="crypto_cluster_k5",
hover_cols=["coin_id"],
title='PCA Clustered k=5 Market Data'
)
scatter_plot_k5
# Compare both scatter plots
scatter_plot_k4 + scatter_plot_k5
```
**Question:** What value of `k` seems to create the most accurate clusters to group cryptocurrencies according to their profitability?
**Answer:** Based on the scatter plots, it seems like k = 4 is the most accurate to group cryptocurrencies according to their profitibility
|
github_jupyter
|
# Import required libraries and dependencies
import pandas as pd
import hvplot.pandas
from path import Path
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
# Load the data into a Pandas DataFrame
df_market_data = pd.read_csv(
Path("Resources/crypto_market_data.csv"),
index_col="coin_id")
# Display sample data
df_market_data.head(10)
# Generate summary statistics
df_market_data.describe()
# Plot your data to see what's in your DataFrame
df_market_data.hvplot.line(
width=800,
height=400,
rot=90
)
# Use the `StandardScaler()` module from scikit-learn to normalize the data from the CSV file
scaled_data = StandardScaler().fit_transform(df_market_data)
# Create a DataFrame with the scaled data
df_market_data_scaled = pd.DataFrame(
scaled_data,
columns=df_market_data.columns
)
# Copy the crypto names from the original data
df_market_data_scaled["coin_id"] = df_market_data.index
# Set the coinid column as index
df_market_data_scaled = df_market_data_scaled.set_index("coin_id")
# Display sample data
df_market_data_scaled.head()
# Initialize the K-Means model with four clusters
model = KMeans(n_clusters=4)
# Fit the K-Means model using the scaled data
model.fit(df_market_data_scaled)
# Predict the clusters to group the cryptocurrencies using the scaled data
crypto_clusters_k4 = model.predict(df_market_data_scaled)
# View the resulting array of cluster values.
print(crypto_clusters_k4)
# Add a new column to the DataFrame with the predicted clusters with k=4
df_market_data_scaled["crypto_cluster_k4"] = crypto_clusters_k4
# Display sample data
df_market_data_scaled.head()
# Create a scatter plot using hvPlot by setting
# `x="price_change_percentage_14d"` and `y="price_change_percentage_1y"`.
# Group the results by the clusters using `by="crypto_cluster_k4".
# Set the hover to the coin id using `hover_cols=["coin_id"]`.
df_market_data_scaled.hvplot.scatter(
x="price_change_percentage_14d",
y="price_change_percentage_1y",
by="crypto_cluster_k4",
hover_cols=["coin_id"],
marker=["hex", "square", "cross", "inverted_triangle", "triangle"],
)
# Create a list with the number of k-values to try
# Use a range from 1 to 11
k = list(range(1, 11))
# Create an empy list to store the inertia values
inertia = []
# Create a for loop to compute the inertia with each possible value of k
# Inside the loop:
# 1. Create a KMeans model using the loop counter for the n_clusters
# 2. Fit the model to the data using `df_market_data_scaled`
# 3. Append the model.inertia_ to the inirtia list
for i in k:
model = KMeans(n_clusters=i, random_state=0)
model.fit(df_market_data_scaled)
inertia.append(model.inertia_)
# Create a dictionary with the data to plot the Elbow curve
elbow_data = {
"k": k,
"inertia": inertia
}
# Create a DataFrame with the data to plot the Elbow curve
df_elbow = pd.DataFrame(elbow_data)
# Plot a line chart with all the inertia values computed with
# the different values of k to visually identify the optimal value for k.
df_elbow.hvplot(x='k', y='inertia', title='Elbow Curve', xticks=k)
# Create a PCA model instance and set `n_components=3`.
pca = PCA(n_components=3)
# Use the PCA model with `fit_transform` to reduce to
# three principal components.
market_pca_data = pca.fit_transform(df_market_data_scaled)
# View the first five rows of the DataFrame.
market_pca_data[:5]
# Retrieve the explained variance to determine how much information
# can be attributed to each principal component.
pca.explained_variance_ratio_
# Creating a DataFrame with the PCA data
df_market_data_pca = pd.DataFrame(
market_pca_data,
columns=["PC1", "PC2", "PC3"]
)
# Copy the crypto names from the original data
df_market_data_pca["coin_id"] = df_market_data.index
# Set the coinid column as index
df_market_data_pca = df_market_data_pca.set_index("coin_id")
# Display sample data
df_market_data_pca.head()
# Initiate a new K-Means algorithm using the PCA DataFrame to group
# the cryptocurrencies. Set the `n_clusters` parameter equal to
# the best value for `k` found before. View the resulting array.
# Initialize the K-Means model
model = KMeans(n_clusters=5)
# Fit the model
model.fit(df_market_data_pca)
# Predict clusters
crypto_clusters_k5 = model.predict(df_market_data_pca)
# View the resulting array
crypto_clusters_k5
# - From the original DataFrame, add the `price_change_percentage_1y` and `price_change_percentage_14d` columns.
# - Add a column with the predicted cluster values identified using a k value of 4. (The predicted cluster values were calculated in the “Cluster Cryptocurrencies with K-means” section.)
# - Add a column with the predicted cluster values identified using the optimal value for k.
# Add the price_change_percentage_1y column from the original data
df_market_data_pca["price_change_percentage_1y"] = df_market_data["price_change_percentage_1y"]
# Add the price_change_percentage_14d column from the original data
df_market_data_pca["price_change_percentage_14d"] = df_market_data["price_change_percentage_14d"]
# Add a new column to the DataFrame with the predicted clusters using the best value of k
df_market_data_pca["crypto_cluster_k5"] = crypto_clusters_k5
# Add a new column to the DataFrame with the predicted clusters using k=4
df_market_data_pca["crypto_cluster_k4"] = crypto_clusters_k4
# Display sample data
df_market_data_pca.head()
# Create a scatter plot for the Crypto Clusters using k=4 data.
# Use the PCA data to create a scatter plot with hvPlot by setting
# x="price_change_percentage_14d" and y="price_change_percentage_1y".
# Group by the clusters using `by="crypto_cluster_k4".
# Set the hover colors to the coin id with `hover_cols=["coin_id"]
# Create a descriptive title for the plot using the title parameter.
scatter_plot_k4 = df_market_data_pca.hvplot.scatter(
x="price_change_percentage_14d",
y="price_change_percentage_1y",
by="crypto_cluster_k4",
hover_cols=["coin_id"],
title='Clustered k=4 Market Data'
)
scatter_plot_k4
# Create a scatter plot for the Crypto Clusters using k=5 data.
# Use the PCA data to create a scatter plot with hvPlot by setting
# x="price_change_percentage_14d" and y="price_change_percentage_1y".
# Group by the clusters using `by="crypto_cluster_k5".
# Set the hover colors to the coin id with `hover_cols=["coin_id"]
# Create a descriptive title for the plot using the title parameter.
scatter_plot_k5 = df_market_data_pca.hvplot.scatter(
x="price_change_percentage_14d",
y="price_change_percentage_1y",
by="crypto_cluster_k5",
hover_cols=["coin_id"],
title='PCA Clustered k=5 Market Data'
)
scatter_plot_k5
# Compare both scatter plots
scatter_plot_k4 + scatter_plot_k5
| 0.936759 | 0.906694 |
# P-BMP280 and T-BMP280 measures
by: Widya Meiriska
```
import csv
import pandas as pd
import numpy as np
import json
import matplotlib.pyplot as plt
%matplotlib inline
```
### 1. Read Dataset
#### P-BMP280-measures
```
df = pd.read_csv('../data/raw/measures/P-BMP280-measures.csv')
df.head()
df.tail()
```
#### T-BMP280-measures
```
df1 = pd.read_csv('../data/raw/measures/T-BMP280-measures.csv')
df1.head()
df1.tail()
```
### 2. Data Investigation
#### P-BMP280-measures
```
df.columns
df.count()
df.isnull().sum()
df.dtypes
df.describe()
df.info()
missingdf = pd.DataFrame(df.isna().sum()).rename(columns = {0: 'total'})
missingdf['percent'] = missingdf['total'] / len(df)
missingdf
```
#### T-BMP280-measures
```
df1.columns
df1.count()
df1.isnull().sum()
df1.dtypes
df1.describe()
df1.info()
missingdf1 = pd.DataFrame(df.isna().sum()).rename(columns = {0: 'total'})
missingdf1['percent'] = missingdf['total'] / len(df)
missingdf1
# Convert time column into date time format
df['time'] = pd.to_datetime(df['time'])
df1['time'] = pd.to_datetime(df1['time'])
```
#### No missing data and different format between data sensor P-BMP280 and T-BMP280 measures
#### Merge P-BMP280-measures and T-BMP280 -measures
P-BMP280 measures the pressure and T-BMP280 measures the temperature. Here I try to merge the data P-BMP280 and T-BMP280 measures, because the measurement time is almost the same
```
df.rename(columns={'sensor': 'pressure sensor','value' : 'P-BMP280'},inplace=True)
df1.rename(columns={'sensor': 'temperature sensor','value' : 'T-BMP280'},inplace=True)
df.head()
df1.head()
newdf = pd.merge(df, df1, on='time', how='outer')
newdf.head()
newdf.tail()
newdf = newdf.reindex(columns=['time','pressure sensor','P-BMP280','temperature sensor','T-BMP280'])
newdf = newdf[['time','pressure sensor','P-BMP280','temperature sensor','T-BMP280']]
data = newdf.drop(["pressure sensor", "temperature sensor"], axis=1)
data.head()
data.tail()
data.count()
missingdata = pd.DataFrame(data.isna().sum()).rename(columns = {0: 'total'})
missingdata['percent'] = missingdata['total'] / len(data)
missingdata
```
#### After merging the data from P-BMP280 and T-BMP280 sensor there are many missing data founded. This is maybe because the difference measurement time, so here I will try to interpolate the missing data.
Fill the NaN value on humidity value with intepolate data using time
```
data.set_index('time',inplace=True)
new_df = data.interpolate(method="time")
new_df.tail()
missingnewdf1 = pd.DataFrame(new_df.isna().sum()).rename(columns = {0: 'total'})
missingnewdf1['percent'] = missingnewdf1['total'] / len(new_df)
missingnewdf1
new_df.count()
new_df.describe()
```
#### After interpolate and fill the data, no more missing value and I assume the data is clean
### 3. Data Visualization
```
%matplotlib inline
plt.figure(figsize=(25, 25))
plt.subplot(2,2,1)
new_df['T-BMP280'].plot()
plt.title('Time vs Temperature')
plt.subplot(2,2,2)
new_df['T-BMP280'].resample('D').mean().plot()
plt.title('Time vs Temperature')
%matplotlib inline
plt.figure(figsize=(25, 25))
plt.subplot(2,2,1)
new_df['P-BMP280'].plot()
plt.title('Time vs Pressure')
plt.subplot(2,2,2)
new_df['P-BMP280'].resample('D').mean().plot()
plt.title('Time vs Pressure')
```
#### We can see that there are some days when there is no measurement at all
|
github_jupyter
|
import csv
import pandas as pd
import numpy as np
import json
import matplotlib.pyplot as plt
%matplotlib inline
df = pd.read_csv('../data/raw/measures/P-BMP280-measures.csv')
df.head()
df.tail()
df1 = pd.read_csv('../data/raw/measures/T-BMP280-measures.csv')
df1.head()
df1.tail()
df.columns
df.count()
df.isnull().sum()
df.dtypes
df.describe()
df.info()
missingdf = pd.DataFrame(df.isna().sum()).rename(columns = {0: 'total'})
missingdf['percent'] = missingdf['total'] / len(df)
missingdf
df1.columns
df1.count()
df1.isnull().sum()
df1.dtypes
df1.describe()
df1.info()
missingdf1 = pd.DataFrame(df.isna().sum()).rename(columns = {0: 'total'})
missingdf1['percent'] = missingdf['total'] / len(df)
missingdf1
# Convert time column into date time format
df['time'] = pd.to_datetime(df['time'])
df1['time'] = pd.to_datetime(df1['time'])
df.rename(columns={'sensor': 'pressure sensor','value' : 'P-BMP280'},inplace=True)
df1.rename(columns={'sensor': 'temperature sensor','value' : 'T-BMP280'},inplace=True)
df.head()
df1.head()
newdf = pd.merge(df, df1, on='time', how='outer')
newdf.head()
newdf.tail()
newdf = newdf.reindex(columns=['time','pressure sensor','P-BMP280','temperature sensor','T-BMP280'])
newdf = newdf[['time','pressure sensor','P-BMP280','temperature sensor','T-BMP280']]
data = newdf.drop(["pressure sensor", "temperature sensor"], axis=1)
data.head()
data.tail()
data.count()
missingdata = pd.DataFrame(data.isna().sum()).rename(columns = {0: 'total'})
missingdata['percent'] = missingdata['total'] / len(data)
missingdata
data.set_index('time',inplace=True)
new_df = data.interpolate(method="time")
new_df.tail()
missingnewdf1 = pd.DataFrame(new_df.isna().sum()).rename(columns = {0: 'total'})
missingnewdf1['percent'] = missingnewdf1['total'] / len(new_df)
missingnewdf1
new_df.count()
new_df.describe()
%matplotlib inline
plt.figure(figsize=(25, 25))
plt.subplot(2,2,1)
new_df['T-BMP280'].plot()
plt.title('Time vs Temperature')
plt.subplot(2,2,2)
new_df['T-BMP280'].resample('D').mean().plot()
plt.title('Time vs Temperature')
%matplotlib inline
plt.figure(figsize=(25, 25))
plt.subplot(2,2,1)
new_df['P-BMP280'].plot()
plt.title('Time vs Pressure')
plt.subplot(2,2,2)
new_df['P-BMP280'].resample('D').mean().plot()
plt.title('Time vs Pressure')
| 0.436382 | 0.90723 |
# Advanced Feature Engineering in Keras
**Learning Objectives**
1. Process temporal feature columns in Keras
2. Use Lambda layers to perform feature engineering on geolocation features
3. Create bucketized and crossed feature columns
## Introduction
In this notebook, we use Keras to build a taxifare price prediction model and utilize feature engineering to improve the fare amount prediction for NYC taxi cab rides.
Each learning objective will correspond to a __#TODO__ in the [student lab notebook](https://github.com/GoogleCloudPlatform/training-data-analyst/blob/master/courses/machine_learning/deepdive2/feature_engineering/labs/4_keras_adv_feat_eng-lab.ipynb) -- try to complete that notebook first before reviewing this solution notebook.
## Set up environment variables and load necessary libraries
We will start by importing the necessary libraries for this lab.
```
# Run the chown command to change the ownership of the repository
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
# Ensure the right version of Tensorflow is installed.
!pip freeze | grep tensorflow==2.1 || pip install --user tensorflow==2.1
```
**Note:** After executing the above cell you will see the output
`tensorflow==2.1.0` that is the installed version of tensorflow.
You may ignore specific incompatibility errors and warnings.
**Restart the kernel** (click on the reload button above).
```
# You can use any Python source file as a module by executing an import statement in some other Python source file.
# The import statement combines two operations; it searches for the named module, then it binds the results of that search
# to a name in the local scope.
import datetime
import logging
import os
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import feature_column as fc
from tensorflow.keras import layers
from tensorflow.keras import models
# set TF error log verbosity
logging.getLogger("tensorflow").setLevel(logging.ERROR)
print(tf.version.VERSION)
```
## Load taxifare dataset
The Taxi Fare dataset for this lab is 106,545 rows and has been pre-processed and split for use in this lab. Note that the dataset is the same as used in the Big Query feature engineering labs. The fare_amount is the target, the continuous value we’ll train a model to predict.
First, let's download the .csv data by copying the data from a cloud storage bucket.
```
# `os.makedirs()` method will create all unavailable/missing directory in the specified path.
if not os.path.isdir("../data"):
os.makedirs("../data")
# The `gsutil cp` command allows you to copy data between the bucket and current directory.
!gsutil cp gs://cloud-training-demos/feat_eng/data/*.csv ../data
```
Let's check that the files were copied correctly and look like we expect them to.
```
# `ls` shows the working directory's contents.
# The `l` flag list the all files with permissions and details.
!ls -l ../data/*.csv
# By default `head` returns the first ten lines of each file.
!head ../data/*.csv
```
## Create an input pipeline
Typically, you will use a two step process to build the pipeline. Step one is to define the columns of data; i.e., which column we're predicting for, and the default values. Step 2 is to define two functions - a function to define the features and label you want to use and a function to load the training data. Also, note that pickup_datetime is a string and we will need to handle this in our feature engineered model.
```
CSV_COLUMNS = [
'fare_amount',
'pickup_datetime',
'pickup_longitude',
'pickup_latitude',
'dropoff_longitude',
'dropoff_latitude',
'passenger_count',
'key',
]
LABEL_COLUMN = 'fare_amount'
STRING_COLS = ['pickup_datetime']
NUMERIC_COLS = ['pickup_longitude', 'pickup_latitude',
'dropoff_longitude', 'dropoff_latitude',
'passenger_count']
DEFAULTS = [[0.0], ['na'], [0.0], [0.0], [0.0], [0.0], [0.0], ['na']]
DAYS = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat']
# A function to define features and labesl
def features_and_labels(row_data):
for unwanted_col in ['key']:
row_data.pop(unwanted_col)
label = row_data.pop(LABEL_COLUMN)
return row_data, label
# A utility method to create a tf.data dataset from a Pandas Dataframe
def load_dataset(pattern, batch_size=1, mode='eval'):
dataset = tf.data.experimental.make_csv_dataset(pattern,
batch_size,
CSV_COLUMNS,
DEFAULTS)
dataset = dataset.map(features_and_labels) # features, label
if mode == 'train':
dataset = dataset.shuffle(1000).repeat()
# take advantage of multi-threading; 1=AUTOTUNE
dataset = dataset.prefetch(1)
return dataset
```
## Create a Baseline DNN Model in Keras
Now let's build the Deep Neural Network (DNN) model in Keras using the functional API. Unlike the sequential API, we will need to specify the input and hidden layers. Note that we are creating a linear regression baseline model with no feature engineering. Recall that a baseline model is a solution to a problem without applying any machine learning techniques.
```
# Build a simple Keras DNN using its Functional API
def rmse(y_true, y_pred): # Root mean square error
return tf.sqrt(tf.reduce_mean(tf.square(y_pred - y_true)))
def build_dnn_model():
# input layer
inputs = {
colname: layers.Input(name=colname, shape=(), dtype='float32')
for colname in NUMERIC_COLS
}
# feature_columns
feature_columns = {
colname: fc.numeric_column(colname)
for colname in NUMERIC_COLS
}
# Constructor for DenseFeatures takes a list of numeric columns
dnn_inputs = layers.DenseFeatures(feature_columns.values())(inputs)
# two hidden layers of [32, 8] just in like the BQML DNN
h1 = layers.Dense(32, activation='relu', name='h1')(dnn_inputs)
h2 = layers.Dense(8, activation='relu', name='h2')(h1)
# final output is a linear activation because this is regression
output = layers.Dense(1, activation='linear', name='fare')(h2)
model = models.Model(inputs, output)
# compile model
model.compile(optimizer='adam', loss='mse', metrics=[rmse, 'mse'])
return model
```
We'll build our DNN model and inspect the model architecture.
```
model = build_dnn_model()
# We can visualize the DNN using the Keras `plot_model` utility.
tf.keras.utils.plot_model(model, 'dnn_model.png', show_shapes=False, rankdir='LR')
```
## Train the model
To train the model, simply call [model.fit()](https://keras.io/models/model/#fit). Note that we should really use many more NUM_TRAIN_EXAMPLES (i.e. a larger dataset). We shouldn't make assumptions about the quality of the model based on training/evaluating it on a small sample of the full data.
We start by setting up the environment variables for training, creating the input pipeline datasets, and then train our baseline DNN model.
```
TRAIN_BATCH_SIZE = 32
NUM_TRAIN_EXAMPLES = 59621 * 5
NUM_EVALS = 5
NUM_EVAL_EXAMPLES = 14906
# `load_dataset` method is used to load the dataset.
trainds = load_dataset('../data/taxi-train*',
TRAIN_BATCH_SIZE,
'train')
evalds = load_dataset('../data/taxi-valid*',
1000,
'eval').take(NUM_EVAL_EXAMPLES//1000)
steps_per_epoch = NUM_TRAIN_EXAMPLES // (TRAIN_BATCH_SIZE * NUM_EVALS)
# `Fit` trains the model for a fixed number of epochs
history = model.fit(trainds,
validation_data=evalds,
epochs=NUM_EVALS,
steps_per_epoch=steps_per_epoch)
```
### Visualize the model loss curve
Next, we will use matplotlib to draw the model's loss curves for training and validation. A line plot is also created showing the mean squared error loss over the training epochs for both the train (blue) and test (orange) sets.
```
# A function to define plot_curves.
def plot_curves(history, metrics):
nrows = 1
ncols = 2
fig = plt.figure(figsize=(10, 5))
for idx, key in enumerate(metrics):
ax = fig.add_subplot(nrows, ncols, idx+1)
plt.plot(history.history[key])
plt.plot(history.history['val_{}'.format(key)])
plt.title('model {}'.format(key))
plt.ylabel(key)
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left');
plot_curves(history, ['loss', 'mse'])
```
### Predict with the model locally
To predict with Keras, you simply call [model.predict()](https://keras.io/models/model/#predict) and pass in the cab ride you want to predict the fare amount for. Next we note the fare price at this geolocation and pickup_datetime.
```
# Use the model to do prediction with `model.predict()`.
model.predict({
'pickup_longitude': tf.convert_to_tensor([-73.982683]),
'pickup_latitude': tf.convert_to_tensor([40.742104]),
'dropoff_longitude': tf.convert_to_tensor([-73.983766]),
'dropoff_latitude': tf.convert_to_tensor([40.755174]),
'passenger_count': tf.convert_to_tensor([3.0]),
'pickup_datetime': tf.convert_to_tensor(['2010-02-08 09:17:00 UTC'], dtype=tf.string),
}, steps=1)
```
## Improve Model Performance Using Feature Engineering
We now improve our model's performance by creating the following feature engineering types: Temporal, Categorical, and Geolocation.
### Temporal Feature Columns
We incorporate the temporal feature pickup_datetime. As noted earlier, pickup_datetime is a string and we will need to handle this within the model. First, you will include the pickup_datetime as a feature and then you will need to modify the model to handle our string feature.
```
# TODO 1a
def parse_datetime(s):
if type(s) is not str:
s = s.numpy().decode('utf-8')
return datetime.datetime.strptime(s, "%Y-%m-%d %H:%M:%S %Z")
# TODO 1b
def get_dayofweek(s):
ts = parse_datetime(s)
return DAYS[ts.weekday()]
# TODO 1c
@tf.function
def dayofweek(ts_in):
return tf.map_fn(
lambda s: tf.py_function(get_dayofweek, inp=[s], Tout=tf.string),
ts_in)
```
### Geolocation/Coordinate Feature Columns
The pick-up/drop-off longitude and latitude data are crucial to predicting the fare amount as fare amounts in NYC taxis are largely determined by the distance traveled. As such, we need to teach the model the Euclidean distance between the pick-up and drop-off points.
Recall that latitude and longitude allows us to specify any location on Earth using a set of coordinates. In our training data set, we restricted our data points to only pickups and drop offs within NYC. New York city has an approximate longitude range of -74.05 to -73.75 and a latitude range of 40.63 to 40.85.
#### Computing Euclidean distance
The dataset contains information regarding the pickup and drop off coordinates. However, there is no information regarding the distance between the pickup and drop off points. Therefore, we create a new feature that calculates the distance between each pair of pickup and drop off points. We can do this using the Euclidean Distance, which is the straight-line distance between any two coordinate points.
```
def euclidean(params):
lon1, lat1, lon2, lat2 = params
londiff = lon2 - lon1
latdiff = lat2 - lat1
return tf.sqrt(londiff*londiff + latdiff*latdiff)
```
#### Scaling latitude and longitude
It is very important for numerical variables to get scaled before they are "fed" into the neural network. Here we use min-max scaling (also called normalization) on the geolocation features. Later in our model, you will see that these values are shifted and rescaled so that they end up ranging from 0 to 1.
First, we create a function named 'scale_longitude', where we pass in all the longitudinal values and add 78 to each value. Note that our scaling longitude ranges from -70 to -78. Thus, the value 78 is the maximum longitudinal value. The delta or difference between -70 and -78 is 8. We add 78 to each longitudinal value and then divide by 8 to return a scaled value.
```
def scale_longitude(lon_column):
return (lon_column + 78)/8.
```
Next, we create a function named 'scale_latitude', where we pass in all the latitudinal values and subtract 37 from each value. Note that our scaling longitude ranges from -37 to -45. Thus, the value 37 is the minimal latitudinal value. The delta or difference between -37 and -45 is 8. We subtract 37 from each latitudinal value and then divide by 8 to return a scaled value.
```
def scale_latitude(lat_column):
return (lat_column - 37)/8.
```
### Putting it all together
We now create two new "geo" functions for our model. We create a function called "euclidean" to initialize our geolocation parameters. We then create a function called transform. The transform function passes our numerical and string column features as inputs to the model, scales geolocation features, then creates the Euclidean distance as a transformed variable with the geolocation features. Lastly, we bucketize the latitude and longitude features.
```
def transform(inputs, numeric_cols, string_cols, nbuckets):
print("Inputs before features transformation: {}".format(inputs.keys()))
# Pass-through columns
transformed = inputs.copy()
del transformed['pickup_datetime']
feature_columns = {
colname: tf.feature_column.numeric_column(colname)
for colname in numeric_cols
}
# TODO 2a
# Scaling longitude from range [-70, -78] to [0, 1]
for lon_col in ['pickup_longitude', 'dropoff_longitude']:
transformed[lon_col] = layers.Lambda(
scale_longitude,
name="scale_{}".format(lon_col))(inputs[lon_col])
# TODO 2b
# Scaling latitude from range [37, 45] to [0, 1]
for lat_col in ['pickup_latitude', 'dropoff_latitude']:
transformed[lat_col] = layers.Lambda(
scale_latitude,
name='scale_{}'.format(lat_col))(inputs[lat_col])
# add Euclidean distance
transformed['euclidean'] = layers.Lambda(
euclidean,
name='euclidean')([inputs['pickup_longitude'],
inputs['pickup_latitude'],
inputs['dropoff_longitude'],
inputs['dropoff_latitude']])
feature_columns['euclidean'] = fc.numeric_column('euclidean')
# TODO 3a
# create bucketized features
latbuckets = np.linspace(0, 1, nbuckets).tolist()
lonbuckets = np.linspace(0, 1, nbuckets).tolist()
b_plat = fc.bucketized_column(
feature_columns['pickup_latitude'], latbuckets)
b_dlat = fc.bucketized_column(
feature_columns['dropoff_latitude'], latbuckets)
b_plon = fc.bucketized_column(
feature_columns['pickup_longitude'], lonbuckets)
b_dlon = fc.bucketized_column(
feature_columns['dropoff_longitude'], lonbuckets)
# TODO 3b
# create crossed columns
ploc = fc.crossed_column([b_plat, b_plon], nbuckets * nbuckets)
dloc = fc.crossed_column([b_dlat, b_dlon], nbuckets * nbuckets)
pd_pair = fc.crossed_column([ploc, dloc], nbuckets ** 4)
# create embedding columns
feature_columns['pickup_and_dropoff'] = fc.embedding_column(pd_pair, 100)
print("Transformed features: {}".format(transformed.keys()))
print("Feature columns: {}".format(feature_columns.keys()))
return transformed, feature_columns
```
Next, we'll create our DNN model now with the engineered features. We'll set `NBUCKETS = 10` to specify 10 buckets when bucketizing the latitude and longitude.
```
NBUCKETS = 10
# DNN MODEL
def rmse(y_true, y_pred):
return tf.sqrt(tf.reduce_mean(tf.square(y_pred - y_true)))
def build_dnn_model():
# input layer is all float except for pickup_datetime which is a string
inputs = {
colname: layers.Input(name=colname, shape=(), dtype='float32')
for colname in NUMERIC_COLS
}
inputs.update({
colname: tf.keras.layers.Input(name=colname, shape=(), dtype='string')
for colname in STRING_COLS
})
# transforms
transformed, feature_columns = transform(inputs,
numeric_cols=NUMERIC_COLS,
string_cols=STRING_COLS,
nbuckets=NBUCKETS)
dnn_inputs = layers.DenseFeatures(feature_columns.values())(transformed)
# two hidden layers of [32, 8] just in like the BQML DNN
h1 = layers.Dense(32, activation='relu', name='h1')(dnn_inputs)
h2 = layers.Dense(8, activation='relu', name='h2')(h1)
# final output is a linear activation because this is regression
output = layers.Dense(1, activation='linear', name='fare')(h2)
model = models.Model(inputs, output)
# Compile model
model.compile(optimizer='adam', loss='mse', metrics=[rmse, 'mse'])
return model
model = build_dnn_model()
```
Let's see how our model architecture has changed now.
```
# We can visualize the DNN using the Keras `plot_model` utility.
tf.keras.utils.plot_model(model, 'dnn_model_engineered.png', show_shapes=False, rankdir='LR')
# `load_dataset` method is used to load the dataset.
trainds = load_dataset('../data/taxi-train*',
TRAIN_BATCH_SIZE,
'train')
evalds = load_dataset('../data/taxi-valid*',
1000,
'eval').take(NUM_EVAL_EXAMPLES//1000)
steps_per_epoch = NUM_TRAIN_EXAMPLES // (TRAIN_BATCH_SIZE * NUM_EVALS)
# `Fit` trains the model for a fixed number of epochs
history = model.fit(trainds,
validation_data=evalds,
epochs=NUM_EVALS+3,
steps_per_epoch=steps_per_epoch)
```
As before, let's visualize the DNN model layers.
```
plot_curves(history, ['loss', 'mse'])
```
Let's a prediction with this new model with engineered features on the example we had above.
```
# Use the model to do prediction with `model.predict()`.
model.predict({
'pickup_longitude': tf.convert_to_tensor([-73.982683]),
'pickup_latitude': tf.convert_to_tensor([40.742104]),
'dropoff_longitude': tf.convert_to_tensor([-73.983766]),
'dropoff_latitude': tf.convert_to_tensor([40.755174]),
'passenger_count': tf.convert_to_tensor([3.0]),
'pickup_datetime': tf.convert_to_tensor(['2010-02-08 09:17:00 UTC'], dtype=tf.string),
}, steps=1)
```
Below we summarize our training results comparing our baseline model with our model with engineered features.
| Model | Taxi Fare | Description |
|--------------------|-----------|-------------------------------------------|
| Baseline | 12.29 | Baseline model - no feature engineering |
| Feature Engineered | 07.28 | Feature Engineered Model |
Copyright 2020 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
|
github_jupyter
|
# Run the chown command to change the ownership of the repository
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
# Ensure the right version of Tensorflow is installed.
!pip freeze | grep tensorflow==2.1 || pip install --user tensorflow==2.1
# You can use any Python source file as a module by executing an import statement in some other Python source file.
# The import statement combines two operations; it searches for the named module, then it binds the results of that search
# to a name in the local scope.
import datetime
import logging
import os
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import feature_column as fc
from tensorflow.keras import layers
from tensorflow.keras import models
# set TF error log verbosity
logging.getLogger("tensorflow").setLevel(logging.ERROR)
print(tf.version.VERSION)
# `os.makedirs()` method will create all unavailable/missing directory in the specified path.
if not os.path.isdir("../data"):
os.makedirs("../data")
# The `gsutil cp` command allows you to copy data between the bucket and current directory.
!gsutil cp gs://cloud-training-demos/feat_eng/data/*.csv ../data
# `ls` shows the working directory's contents.
# The `l` flag list the all files with permissions and details.
!ls -l ../data/*.csv
# By default `head` returns the first ten lines of each file.
!head ../data/*.csv
CSV_COLUMNS = [
'fare_amount',
'pickup_datetime',
'pickup_longitude',
'pickup_latitude',
'dropoff_longitude',
'dropoff_latitude',
'passenger_count',
'key',
]
LABEL_COLUMN = 'fare_amount'
STRING_COLS = ['pickup_datetime']
NUMERIC_COLS = ['pickup_longitude', 'pickup_latitude',
'dropoff_longitude', 'dropoff_latitude',
'passenger_count']
DEFAULTS = [[0.0], ['na'], [0.0], [0.0], [0.0], [0.0], [0.0], ['na']]
DAYS = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat']
# A function to define features and labesl
def features_and_labels(row_data):
for unwanted_col in ['key']:
row_data.pop(unwanted_col)
label = row_data.pop(LABEL_COLUMN)
return row_data, label
# A utility method to create a tf.data dataset from a Pandas Dataframe
def load_dataset(pattern, batch_size=1, mode='eval'):
dataset = tf.data.experimental.make_csv_dataset(pattern,
batch_size,
CSV_COLUMNS,
DEFAULTS)
dataset = dataset.map(features_and_labels) # features, label
if mode == 'train':
dataset = dataset.shuffle(1000).repeat()
# take advantage of multi-threading; 1=AUTOTUNE
dataset = dataset.prefetch(1)
return dataset
# Build a simple Keras DNN using its Functional API
def rmse(y_true, y_pred): # Root mean square error
return tf.sqrt(tf.reduce_mean(tf.square(y_pred - y_true)))
def build_dnn_model():
# input layer
inputs = {
colname: layers.Input(name=colname, shape=(), dtype='float32')
for colname in NUMERIC_COLS
}
# feature_columns
feature_columns = {
colname: fc.numeric_column(colname)
for colname in NUMERIC_COLS
}
# Constructor for DenseFeatures takes a list of numeric columns
dnn_inputs = layers.DenseFeatures(feature_columns.values())(inputs)
# two hidden layers of [32, 8] just in like the BQML DNN
h1 = layers.Dense(32, activation='relu', name='h1')(dnn_inputs)
h2 = layers.Dense(8, activation='relu', name='h2')(h1)
# final output is a linear activation because this is regression
output = layers.Dense(1, activation='linear', name='fare')(h2)
model = models.Model(inputs, output)
# compile model
model.compile(optimizer='adam', loss='mse', metrics=[rmse, 'mse'])
return model
model = build_dnn_model()
# We can visualize the DNN using the Keras `plot_model` utility.
tf.keras.utils.plot_model(model, 'dnn_model.png', show_shapes=False, rankdir='LR')
TRAIN_BATCH_SIZE = 32
NUM_TRAIN_EXAMPLES = 59621 * 5
NUM_EVALS = 5
NUM_EVAL_EXAMPLES = 14906
# `load_dataset` method is used to load the dataset.
trainds = load_dataset('../data/taxi-train*',
TRAIN_BATCH_SIZE,
'train')
evalds = load_dataset('../data/taxi-valid*',
1000,
'eval').take(NUM_EVAL_EXAMPLES//1000)
steps_per_epoch = NUM_TRAIN_EXAMPLES // (TRAIN_BATCH_SIZE * NUM_EVALS)
# `Fit` trains the model for a fixed number of epochs
history = model.fit(trainds,
validation_data=evalds,
epochs=NUM_EVALS,
steps_per_epoch=steps_per_epoch)
# A function to define plot_curves.
def plot_curves(history, metrics):
nrows = 1
ncols = 2
fig = plt.figure(figsize=(10, 5))
for idx, key in enumerate(metrics):
ax = fig.add_subplot(nrows, ncols, idx+1)
plt.plot(history.history[key])
plt.plot(history.history['val_{}'.format(key)])
plt.title('model {}'.format(key))
plt.ylabel(key)
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left');
plot_curves(history, ['loss', 'mse'])
# Use the model to do prediction with `model.predict()`.
model.predict({
'pickup_longitude': tf.convert_to_tensor([-73.982683]),
'pickup_latitude': tf.convert_to_tensor([40.742104]),
'dropoff_longitude': tf.convert_to_tensor([-73.983766]),
'dropoff_latitude': tf.convert_to_tensor([40.755174]),
'passenger_count': tf.convert_to_tensor([3.0]),
'pickup_datetime': tf.convert_to_tensor(['2010-02-08 09:17:00 UTC'], dtype=tf.string),
}, steps=1)
# TODO 1a
def parse_datetime(s):
if type(s) is not str:
s = s.numpy().decode('utf-8')
return datetime.datetime.strptime(s, "%Y-%m-%d %H:%M:%S %Z")
# TODO 1b
def get_dayofweek(s):
ts = parse_datetime(s)
return DAYS[ts.weekday()]
# TODO 1c
@tf.function
def dayofweek(ts_in):
return tf.map_fn(
lambda s: tf.py_function(get_dayofweek, inp=[s], Tout=tf.string),
ts_in)
def euclidean(params):
lon1, lat1, lon2, lat2 = params
londiff = lon2 - lon1
latdiff = lat2 - lat1
return tf.sqrt(londiff*londiff + latdiff*latdiff)
def scale_longitude(lon_column):
return (lon_column + 78)/8.
def scale_latitude(lat_column):
return (lat_column - 37)/8.
def transform(inputs, numeric_cols, string_cols, nbuckets):
print("Inputs before features transformation: {}".format(inputs.keys()))
# Pass-through columns
transformed = inputs.copy()
del transformed['pickup_datetime']
feature_columns = {
colname: tf.feature_column.numeric_column(colname)
for colname in numeric_cols
}
# TODO 2a
# Scaling longitude from range [-70, -78] to [0, 1]
for lon_col in ['pickup_longitude', 'dropoff_longitude']:
transformed[lon_col] = layers.Lambda(
scale_longitude,
name="scale_{}".format(lon_col))(inputs[lon_col])
# TODO 2b
# Scaling latitude from range [37, 45] to [0, 1]
for lat_col in ['pickup_latitude', 'dropoff_latitude']:
transformed[lat_col] = layers.Lambda(
scale_latitude,
name='scale_{}'.format(lat_col))(inputs[lat_col])
# add Euclidean distance
transformed['euclidean'] = layers.Lambda(
euclidean,
name='euclidean')([inputs['pickup_longitude'],
inputs['pickup_latitude'],
inputs['dropoff_longitude'],
inputs['dropoff_latitude']])
feature_columns['euclidean'] = fc.numeric_column('euclidean')
# TODO 3a
# create bucketized features
latbuckets = np.linspace(0, 1, nbuckets).tolist()
lonbuckets = np.linspace(0, 1, nbuckets).tolist()
b_plat = fc.bucketized_column(
feature_columns['pickup_latitude'], latbuckets)
b_dlat = fc.bucketized_column(
feature_columns['dropoff_latitude'], latbuckets)
b_plon = fc.bucketized_column(
feature_columns['pickup_longitude'], lonbuckets)
b_dlon = fc.bucketized_column(
feature_columns['dropoff_longitude'], lonbuckets)
# TODO 3b
# create crossed columns
ploc = fc.crossed_column([b_plat, b_plon], nbuckets * nbuckets)
dloc = fc.crossed_column([b_dlat, b_dlon], nbuckets * nbuckets)
pd_pair = fc.crossed_column([ploc, dloc], nbuckets ** 4)
# create embedding columns
feature_columns['pickup_and_dropoff'] = fc.embedding_column(pd_pair, 100)
print("Transformed features: {}".format(transformed.keys()))
print("Feature columns: {}".format(feature_columns.keys()))
return transformed, feature_columns
NBUCKETS = 10
# DNN MODEL
def rmse(y_true, y_pred):
return tf.sqrt(tf.reduce_mean(tf.square(y_pred - y_true)))
def build_dnn_model():
# input layer is all float except for pickup_datetime which is a string
inputs = {
colname: layers.Input(name=colname, shape=(), dtype='float32')
for colname in NUMERIC_COLS
}
inputs.update({
colname: tf.keras.layers.Input(name=colname, shape=(), dtype='string')
for colname in STRING_COLS
})
# transforms
transformed, feature_columns = transform(inputs,
numeric_cols=NUMERIC_COLS,
string_cols=STRING_COLS,
nbuckets=NBUCKETS)
dnn_inputs = layers.DenseFeatures(feature_columns.values())(transformed)
# two hidden layers of [32, 8] just in like the BQML DNN
h1 = layers.Dense(32, activation='relu', name='h1')(dnn_inputs)
h2 = layers.Dense(8, activation='relu', name='h2')(h1)
# final output is a linear activation because this is regression
output = layers.Dense(1, activation='linear', name='fare')(h2)
model = models.Model(inputs, output)
# Compile model
model.compile(optimizer='adam', loss='mse', metrics=[rmse, 'mse'])
return model
model = build_dnn_model()
# We can visualize the DNN using the Keras `plot_model` utility.
tf.keras.utils.plot_model(model, 'dnn_model_engineered.png', show_shapes=False, rankdir='LR')
# `load_dataset` method is used to load the dataset.
trainds = load_dataset('../data/taxi-train*',
TRAIN_BATCH_SIZE,
'train')
evalds = load_dataset('../data/taxi-valid*',
1000,
'eval').take(NUM_EVAL_EXAMPLES//1000)
steps_per_epoch = NUM_TRAIN_EXAMPLES // (TRAIN_BATCH_SIZE * NUM_EVALS)
# `Fit` trains the model for a fixed number of epochs
history = model.fit(trainds,
validation_data=evalds,
epochs=NUM_EVALS+3,
steps_per_epoch=steps_per_epoch)
plot_curves(history, ['loss', 'mse'])
# Use the model to do prediction with `model.predict()`.
model.predict({
'pickup_longitude': tf.convert_to_tensor([-73.982683]),
'pickup_latitude': tf.convert_to_tensor([40.742104]),
'dropoff_longitude': tf.convert_to_tensor([-73.983766]),
'dropoff_latitude': tf.convert_to_tensor([40.755174]),
'passenger_count': tf.convert_to_tensor([3.0]),
'pickup_datetime': tf.convert_to_tensor(['2010-02-08 09:17:00 UTC'], dtype=tf.string),
}, steps=1)
| 0.672869 | 0.985594 |
# Notebook 1 - Basic Exploration & Logistic Regression Baseline
```
%matplotlib inline
import os
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from scipy.sparse import hstack
import spacy
```
## Data Ingestion
```
BASEDIR = '/data/datasets/kaggle/jigsaw-toxic-comment-classification-challenge'
```
Let's first inspect the training set and gather basic metrics
```
train = pd.read_csv(os.path.join(BASEDIR, 'train.csv'))
train.head()
lens = train.comment_text.str.len()
lens.mean(), lens.std(), lens.max()
lens.hist();
test = pd.read_csv(os.path.join(BASEDIR, 'test.csv'))
test.head()
train['comment_text'] = train['comment_text'].fillna(' ')
test['comment_text'] = test['comment_text'].fillna(' ')
submission = pd.read_csv(os.path.join(BASEDIR, 'sample_submission.csv'))
submission.head()
```
## Basic analysis
This is a multilabel classification task, so let's check the proportion of each label:
```
for label in ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']:
print(label, (train[label] == 1.0).sum() / len(train))
train[['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']].corr()
token_counts = CountVectorizer(
strip_accents='unicode',
analyzer='word',
lowercase=True,
ngram_range=(1,1),
token_pattern=r'\w{2,}',
max_features=30000)
token_counts.fit(train['comment_text'])
X = token_counts.fit_transform(train['comment_text'])
token_freq = X.sum(axis=0).tolist()[0]
token_freq[:5]
token_counts_list = [(k, token_freq[v]) for k, v in token_counts.vocabulary_.items()]
token_counts_list = sorted(token_counts_list, key=lambda x: x[1], reverse=True)
token_counts_list[:25]
token_counts_list[-25:]
```
## Text Preprocessing
```
sample_train = train[:100]
nlp = spacy.load('en', disable=['parser', 'ner', 'textcat'])
def reduce_to_double_max(text):
"""Removes unecessary doubling/tripling/etc of characters
Steps:
1. Replaces every 3+ consecutive identical chars by 2 consecutive identical chars
2. Replaces every 2+ consecutive non-word character by a single
"""
import re
text = re.sub(r'(\w)\1{2,}', r'\1\1', text)
return re.sub(r'(\W)\1+', r'\1', text)
def preprocess_corpus(corpus):
"""Applies all preprocessing rules to the corpus"""
corpus = (reduce_to_double_max(s.lower()) for s in corpus)
docs = nlp.pipe(corpus, batch_size=1000, n_threads=12)
return [' '.join([x.lemma_ for x in doc if x.is_alpha]) for doc in docs]
sample_train['comment_text_processed'] = preprocess_corpus(sample_train['comment_text'])
sample_train.head()
fname_train_processed = '../data/processed/train.txt'
if os.path.isfile(fname_train_processed):
with open(fname_train_processed, 'r') as fin:
train_processed = [line.strip() for line in fin if line]
else:
train_processed = preprocess_corpus(train['comment_text'])
with open(fname_train_processed, 'w') as fout:
for doc in train_processed:
fout.write('{}\n'.format(doc))
train['comment_text_processed'] = train_processed
fname_test_processed = '../data/processed/test.txt'
if os.path.isfile(fname_test_processed):
with open(fname_test_processed, 'r') as fin:
test_processed = [line.strip() for line in fin if line]
else:
test_processed = preprocess_corpus(test['comment_text'])
with open(fname_test_processed, 'w') as fout:
for doc in test_processed:
fout.write('{}\n'.format(doc))
test['comment_text_processed'] = test_processed
```
## Train & Validation
```
class_names = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
all_text = pd.concat([train['comment_text_processed'], test['comment_text_processed']])
word_vect = TfidfVectorizer(
sublinear_tf=True,
strip_accents='unicode',
analyzer='word',
token_pattern=r'\w{2,}',
ngram_range=(1,2),
max_features=100000,
binary=True)
word_vect.fit(all_text)
train_word_features = word_vect.transform(train['comment_text_processed'])
test_word_features = word_vect.transform(test['comment_text_processed'])
char_vect = TfidfVectorizer(
sublinear_tf=True,
strip_accents='unicode',
analyzer='char',
ngram_range=(1,4),
max_features=50000)
char_vect.fit(all_text)
train_char_features = char_vect.transform(train['comment_text_processed'])
test_char_features = char_vect.transform(test['comment_text_processed'])
train_features = hstack((train_char_features, train_word_features))
test_features = hstack((test_char_features, test_word_features))
def evaluate_model(model, y_true, train_ft):
cv_loss = np.mean(cross_val_score(model, train_ft, y_true, cv=3, n_jobs=4, scoring='neg_log_loss'))
return cv_loss
losses = []
preds = {'id': test['id']}
for class_name in class_names:
targets = train[class_name]
model = LogisticRegression(C=4.5, solver='sag')
loss = evaluate_model(model, targets, train_features)
print('Avg. CV loss for class {}: {}'.format(class_name, loss))
losses.append(loss)
model.fit(train_features, targets)
preds[class_name] = model.predict_proba(test_features)[:, 1]
print('Cumulative Avg. CV loss: {}'.format(np.mean(losses)))
```
## Submission
```
submission = pd.DataFrame.from_dict(preds)
import time
submission.to_csv('../data/external/submission-{}.csv'.format(time.strftime('%Y%m%d_%H%M', time.localtime())), index=False)
```
|
github_jupyter
|
%matplotlib inline
import os
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from scipy.sparse import hstack
import spacy
BASEDIR = '/data/datasets/kaggle/jigsaw-toxic-comment-classification-challenge'
train = pd.read_csv(os.path.join(BASEDIR, 'train.csv'))
train.head()
lens = train.comment_text.str.len()
lens.mean(), lens.std(), lens.max()
lens.hist();
test = pd.read_csv(os.path.join(BASEDIR, 'test.csv'))
test.head()
train['comment_text'] = train['comment_text'].fillna(' ')
test['comment_text'] = test['comment_text'].fillna(' ')
submission = pd.read_csv(os.path.join(BASEDIR, 'sample_submission.csv'))
submission.head()
for label in ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']:
print(label, (train[label] == 1.0).sum() / len(train))
train[['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']].corr()
token_counts = CountVectorizer(
strip_accents='unicode',
analyzer='word',
lowercase=True,
ngram_range=(1,1),
token_pattern=r'\w{2,}',
max_features=30000)
token_counts.fit(train['comment_text'])
X = token_counts.fit_transform(train['comment_text'])
token_freq = X.sum(axis=0).tolist()[0]
token_freq[:5]
token_counts_list = [(k, token_freq[v]) for k, v in token_counts.vocabulary_.items()]
token_counts_list = sorted(token_counts_list, key=lambda x: x[1], reverse=True)
token_counts_list[:25]
token_counts_list[-25:]
sample_train = train[:100]
nlp = spacy.load('en', disable=['parser', 'ner', 'textcat'])
def reduce_to_double_max(text):
"""Removes unecessary doubling/tripling/etc of characters
Steps:
1. Replaces every 3+ consecutive identical chars by 2 consecutive identical chars
2. Replaces every 2+ consecutive non-word character by a single
"""
import re
text = re.sub(r'(\w)\1{2,}', r'\1\1', text)
return re.sub(r'(\W)\1+', r'\1', text)
def preprocess_corpus(corpus):
"""Applies all preprocessing rules to the corpus"""
corpus = (reduce_to_double_max(s.lower()) for s in corpus)
docs = nlp.pipe(corpus, batch_size=1000, n_threads=12)
return [' '.join([x.lemma_ for x in doc if x.is_alpha]) for doc in docs]
sample_train['comment_text_processed'] = preprocess_corpus(sample_train['comment_text'])
sample_train.head()
fname_train_processed = '../data/processed/train.txt'
if os.path.isfile(fname_train_processed):
with open(fname_train_processed, 'r') as fin:
train_processed = [line.strip() for line in fin if line]
else:
train_processed = preprocess_corpus(train['comment_text'])
with open(fname_train_processed, 'w') as fout:
for doc in train_processed:
fout.write('{}\n'.format(doc))
train['comment_text_processed'] = train_processed
fname_test_processed = '../data/processed/test.txt'
if os.path.isfile(fname_test_processed):
with open(fname_test_processed, 'r') as fin:
test_processed = [line.strip() for line in fin if line]
else:
test_processed = preprocess_corpus(test['comment_text'])
with open(fname_test_processed, 'w') as fout:
for doc in test_processed:
fout.write('{}\n'.format(doc))
test['comment_text_processed'] = test_processed
class_names = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
all_text = pd.concat([train['comment_text_processed'], test['comment_text_processed']])
word_vect = TfidfVectorizer(
sublinear_tf=True,
strip_accents='unicode',
analyzer='word',
token_pattern=r'\w{2,}',
ngram_range=(1,2),
max_features=100000,
binary=True)
word_vect.fit(all_text)
train_word_features = word_vect.transform(train['comment_text_processed'])
test_word_features = word_vect.transform(test['comment_text_processed'])
char_vect = TfidfVectorizer(
sublinear_tf=True,
strip_accents='unicode',
analyzer='char',
ngram_range=(1,4),
max_features=50000)
char_vect.fit(all_text)
train_char_features = char_vect.transform(train['comment_text_processed'])
test_char_features = char_vect.transform(test['comment_text_processed'])
train_features = hstack((train_char_features, train_word_features))
test_features = hstack((test_char_features, test_word_features))
def evaluate_model(model, y_true, train_ft):
cv_loss = np.mean(cross_val_score(model, train_ft, y_true, cv=3, n_jobs=4, scoring='neg_log_loss'))
return cv_loss
losses = []
preds = {'id': test['id']}
for class_name in class_names:
targets = train[class_name]
model = LogisticRegression(C=4.5, solver='sag')
loss = evaluate_model(model, targets, train_features)
print('Avg. CV loss for class {}: {}'.format(class_name, loss))
losses.append(loss)
model.fit(train_features, targets)
preds[class_name] = model.predict_proba(test_features)[:, 1]
print('Cumulative Avg. CV loss: {}'.format(np.mean(losses)))
submission = pd.DataFrame.from_dict(preds)
import time
submission.to_csv('../data/external/submission-{}.csv'.format(time.strftime('%Y%m%d_%H%M', time.localtime())), index=False)
| 0.467818 | 0.775987 |
# Exercise 2
You saw how the simple int type in Python can be used to represent a bit string.
Write an ergonomic wrapper around int that can be used generically as a
sequence of bits (make it iterable and implement `__getitem__()` ). Reimplement
CompressedGene, using the wrapper.
```
from sys import getsizeof
class BitString:
"""
Provides an indexable and iterable BitString with configurable width
"""
def __init__(self, width:int=1)->None:
self.bit_string = 0
self.width = width
self.item_max = (1<<self.width)-1
self.length = 0
def __getitem__(self, i:int)->int:
if i>=self.length:
raise IndexError('Out of bounds')
return self.bit_string>>(i*self.width) & self.item_max
def __setitem__(self, i:int, val:int)->None:
if val>self.item_max:
raise Exception(f"Value {val} has more than {self.width} bits.")
if i+1>self.length:
self.length = i+1
self.bit_string = self.bit_string | val<<(i*self.width)
def __str__(self)->str:
return f"BitString with width {self.width}: {[x for x in self]}"
def __len__(self)->int:
return self.length
def __sizeof__(self)->int:
return getsizeof(self.bit_string)
bs = BitString(width=2)
bs[1] = 1
bs[4] = 3
str(bs)
len(bs)
getsizeof(bs)
for i in bs:
print(f"{i}")
bs[2]=4
class CompressedGeneBS:
def __init__(self, gene: str) -> None:
self._compress(gene)
def _compress(self, gene: str) -> None:
self.bit_string = BitString(width=2)
i = 0
for nucleotide in gene.upper():
if nucleotide == "A": # change last two bits to 00
self.bit_string[i] = 0b00
elif nucleotide == "C": # change last two bits to 01
self.bit_string[i] = 0b01
elif nucleotide == "G": # change last two bits to 10
self.bit_string[i] = 0b10
elif nucleotide == "T": # change last two bits to 11
self.bit_string[i] = 0b11
else:
raise ValueError("Invalid Nucleotide:{}".format(nucleotide))
i += 1
def decompress(self) -> str:
gene: str = ""
for bits in self.bit_string:
if bits == 0b00: # A
gene += "A"
elif bits == 0b01: # C
gene += "C"
elif bits == 0b10: # G
gene += "G"
elif bits == 0b11: # T
gene += "T"
else:
raise ValueError("Invalid bits:{}".format(bits))
return gene
def __str__(self) -> str: # string representation for pretty printing
return self.decompress()
from sys import getsizeof
original: str = "TAGGGATTAACCGTTATATATATATAGCCATGGATCGATTATATAGGGATTAACCGTTATATATATATAGCCATGGATCGATTATA" * 100
print("original is {} bytes".format(getsizeof(original)))
compressed: CompressedGeneBS = CompressedGeneBS(original) # compress
print("compressed is {} bytes".format(getsizeof(compressed.bit_string)))
print(compressed) # decompress
print("original and decompressed are the same: {}".format(original == compressed.decompress()))
```
|
github_jupyter
|
from sys import getsizeof
class BitString:
"""
Provides an indexable and iterable BitString with configurable width
"""
def __init__(self, width:int=1)->None:
self.bit_string = 0
self.width = width
self.item_max = (1<<self.width)-1
self.length = 0
def __getitem__(self, i:int)->int:
if i>=self.length:
raise IndexError('Out of bounds')
return self.bit_string>>(i*self.width) & self.item_max
def __setitem__(self, i:int, val:int)->None:
if val>self.item_max:
raise Exception(f"Value {val} has more than {self.width} bits.")
if i+1>self.length:
self.length = i+1
self.bit_string = self.bit_string | val<<(i*self.width)
def __str__(self)->str:
return f"BitString with width {self.width}: {[x for x in self]}"
def __len__(self)->int:
return self.length
def __sizeof__(self)->int:
return getsizeof(self.bit_string)
bs = BitString(width=2)
bs[1] = 1
bs[4] = 3
str(bs)
len(bs)
getsizeof(bs)
for i in bs:
print(f"{i}")
bs[2]=4
class CompressedGeneBS:
def __init__(self, gene: str) -> None:
self._compress(gene)
def _compress(self, gene: str) -> None:
self.bit_string = BitString(width=2)
i = 0
for nucleotide in gene.upper():
if nucleotide == "A": # change last two bits to 00
self.bit_string[i] = 0b00
elif nucleotide == "C": # change last two bits to 01
self.bit_string[i] = 0b01
elif nucleotide == "G": # change last two bits to 10
self.bit_string[i] = 0b10
elif nucleotide == "T": # change last two bits to 11
self.bit_string[i] = 0b11
else:
raise ValueError("Invalid Nucleotide:{}".format(nucleotide))
i += 1
def decompress(self) -> str:
gene: str = ""
for bits in self.bit_string:
if bits == 0b00: # A
gene += "A"
elif bits == 0b01: # C
gene += "C"
elif bits == 0b10: # G
gene += "G"
elif bits == 0b11: # T
gene += "T"
else:
raise ValueError("Invalid bits:{}".format(bits))
return gene
def __str__(self) -> str: # string representation for pretty printing
return self.decompress()
from sys import getsizeof
original: str = "TAGGGATTAACCGTTATATATATATAGCCATGGATCGATTATATAGGGATTAACCGTTATATATATATAGCCATGGATCGATTATA" * 100
print("original is {} bytes".format(getsizeof(original)))
compressed: CompressedGeneBS = CompressedGeneBS(original) # compress
print("compressed is {} bytes".format(getsizeof(compressed.bit_string)))
print(compressed) # decompress
print("original and decompressed are the same: {}".format(original == compressed.decompress()))
| 0.262842 | 0.842992 |
## Import libs, set paths and load params
```
import os, glob
import numpy as np
import pandas as pd
import sys
sys.path.insert(0, "../src")
import auxilary_functions as f
import subprocess
import csv
import matplotlib.pyplot as plt
cfg_file = "../src/config-ecoli.json"
cfg = f.get_actual_parametrization("../src/config-ecoli.json")
networks = ['fflatt']
organisms = ['ecoli']
sizes = ['500', '750']
n_trials = 10
cascades=['1']
p2=['0.5','0.7','0.9'] #0.2, 0.5, 0.8 (and 0.3?)
p4=['0.5','0.7','0.9'] #0.2, 0.5, 0.8 (and 0.3?)
os.chdir('../networks/')
fflattdir = '../snippets/'
topology_dir = os.path.join(os.getcwd(), 'fflatt_motif_depletion')
#collect data
for size in sizes:
for cascade in cascades:
for network in p2:
for organism in p4:
current_dir = os.path.join(topology_dir, size, cascade, network, organism)
if not os.path.exists(os.path.abspath(current_dir)):
print('making dirs...')
os.makedirs(os.path.abspath(current_dir), exist_ok=True)
print('running fflatt...')
subprocess.call(['python3', fflattdir+'parameter_space_exploration.py',\
cfg_file, size, str(n_trials), current_dir, network, organism, cascade])
```
## Display and save z-scores
```
for size in sizes:
for cascade in cascades:
for network in p2:
for organism in p4:
current_dir = os.path.join(topology_dir, size, cascade, network, organism)
for rep, file in enumerate(glob.glob(os.path.join(current_dir, '*sv'))):
if not os.path.exists(os.path.join(topology_dir, 'z-scores', size+'_'+cascade+'_'+network+'_'+organism+'_'+str(rep)+'_z_score.tsv')):
pandas_df_lst = []
print(rep, file)
report = f.analyze_exctracted_network(cfg, file, network, rep, size, stability_motifs=True)
print(report)
pandas_df_lst.append(report)
pandas_df_list = sum(pandas_df_lst)/len(pandas_df_lst)
pandas_df_list['size'] = size
pandas_df_list['p2_value'] = network
pandas_df_list['p4_value'] = organism
pandas_df_list['cascade_value'] = cascade
pandas_df_list['rep_num'] = rep
print(pandas_df_list)
pandas_df_list.to_csv(os.path.join(topology_dir, 'z-scores', size+'_'+cascade+'_'+network+'_'+organism+'_'+str(rep)+'_z_score.tsv'))
#df_topo
```
## Group-by z-scores and save as table
```
zscore_stats_lst = []
zscore_stats_lst = []
for rep, file in enumerate(glob.glob(os.path.join(topology_dir, 'z-scores', '*.tsv'))):
zscore_stats_df = pd.io.parsers.read_csv(file, sep=",", index_col=0, header=None, skiprows=1)
zscore_stats_df['motif'] = zscore_stats_df.index
zscore_stats_df.reset_index()
zscore_stats_df.columns = ['counts_ori', 'counts_rand', 'sd_rand',\
'z-score', 'p-val', 'size', 'p2', 'p4', 'cascades', 'rep_num', 'motif']
print(zscore_stats_df)
zscore_stats_lst.append(zscore_stats_df)
zscore_stats_df = pd.concat(zscore_stats_lst)
zscore_stats_df.reset_index(drop=True, inplace=True)
zscore_stats_df = zscore_stats_df[zscore_stats_df['cascades']==1]
zscore_stats_df = zscore_stats_df.drop('cascades', 1)
zscore_stats_df
zscore_stats_df_mean = zscore_stats_df.groupby(['p2', 'p4', 'motif']).mean()
zscore_stats_df_mean = zscore_stats_df_mean['z-score'].unstack()
zscore_stats_df_mean = zscore_stats_df_mean.round(3)
zscore_stats_df_mean
zscore_stats_df_std = zscore_stats_df.groupby(['p2', 'p4', 'motif']).std()
zscore_stats_df_std = zscore_stats_df_std['z-score'].unstack()
zscore_stats_df_std = zscore_stats_df_std.pow(2, axis = 1).div(n_trials).round(3)
zscore_stats_df_std
final_table_s2 = zscore_stats_df_mean.astype(str) + u"\u00B1" + zscore_stats_df_std.astype(str)
final_table_s2
final_table_s2.to_csv("s2_table_depleted_500_and_750.csv", sep="\t")
```
|
github_jupyter
|
import os, glob
import numpy as np
import pandas as pd
import sys
sys.path.insert(0, "../src")
import auxilary_functions as f
import subprocess
import csv
import matplotlib.pyplot as plt
cfg_file = "../src/config-ecoli.json"
cfg = f.get_actual_parametrization("../src/config-ecoli.json")
networks = ['fflatt']
organisms = ['ecoli']
sizes = ['500', '750']
n_trials = 10
cascades=['1']
p2=['0.5','0.7','0.9'] #0.2, 0.5, 0.8 (and 0.3?)
p4=['0.5','0.7','0.9'] #0.2, 0.5, 0.8 (and 0.3?)
os.chdir('../networks/')
fflattdir = '../snippets/'
topology_dir = os.path.join(os.getcwd(), 'fflatt_motif_depletion')
#collect data
for size in sizes:
for cascade in cascades:
for network in p2:
for organism in p4:
current_dir = os.path.join(topology_dir, size, cascade, network, organism)
if not os.path.exists(os.path.abspath(current_dir)):
print('making dirs...')
os.makedirs(os.path.abspath(current_dir), exist_ok=True)
print('running fflatt...')
subprocess.call(['python3', fflattdir+'parameter_space_exploration.py',\
cfg_file, size, str(n_trials), current_dir, network, organism, cascade])
for size in sizes:
for cascade in cascades:
for network in p2:
for organism in p4:
current_dir = os.path.join(topology_dir, size, cascade, network, organism)
for rep, file in enumerate(glob.glob(os.path.join(current_dir, '*sv'))):
if not os.path.exists(os.path.join(topology_dir, 'z-scores', size+'_'+cascade+'_'+network+'_'+organism+'_'+str(rep)+'_z_score.tsv')):
pandas_df_lst = []
print(rep, file)
report = f.analyze_exctracted_network(cfg, file, network, rep, size, stability_motifs=True)
print(report)
pandas_df_lst.append(report)
pandas_df_list = sum(pandas_df_lst)/len(pandas_df_lst)
pandas_df_list['size'] = size
pandas_df_list['p2_value'] = network
pandas_df_list['p4_value'] = organism
pandas_df_list['cascade_value'] = cascade
pandas_df_list['rep_num'] = rep
print(pandas_df_list)
pandas_df_list.to_csv(os.path.join(topology_dir, 'z-scores', size+'_'+cascade+'_'+network+'_'+organism+'_'+str(rep)+'_z_score.tsv'))
#df_topo
zscore_stats_lst = []
zscore_stats_lst = []
for rep, file in enumerate(glob.glob(os.path.join(topology_dir, 'z-scores', '*.tsv'))):
zscore_stats_df = pd.io.parsers.read_csv(file, sep=",", index_col=0, header=None, skiprows=1)
zscore_stats_df['motif'] = zscore_stats_df.index
zscore_stats_df.reset_index()
zscore_stats_df.columns = ['counts_ori', 'counts_rand', 'sd_rand',\
'z-score', 'p-val', 'size', 'p2', 'p4', 'cascades', 'rep_num', 'motif']
print(zscore_stats_df)
zscore_stats_lst.append(zscore_stats_df)
zscore_stats_df = pd.concat(zscore_stats_lst)
zscore_stats_df.reset_index(drop=True, inplace=True)
zscore_stats_df = zscore_stats_df[zscore_stats_df['cascades']==1]
zscore_stats_df = zscore_stats_df.drop('cascades', 1)
zscore_stats_df
zscore_stats_df_mean = zscore_stats_df.groupby(['p2', 'p4', 'motif']).mean()
zscore_stats_df_mean = zscore_stats_df_mean['z-score'].unstack()
zscore_stats_df_mean = zscore_stats_df_mean.round(3)
zscore_stats_df_mean
zscore_stats_df_std = zscore_stats_df.groupby(['p2', 'p4', 'motif']).std()
zscore_stats_df_std = zscore_stats_df_std['z-score'].unstack()
zscore_stats_df_std = zscore_stats_df_std.pow(2, axis = 1).div(n_trials).round(3)
zscore_stats_df_std
final_table_s2 = zscore_stats_df_mean.astype(str) + u"\u00B1" + zscore_stats_df_std.astype(str)
final_table_s2
final_table_s2.to_csv("s2_table_depleted_500_and_750.csv", sep="\t")
| 0.098947 | 0.500305 |
# Dialysis capapcity model
The dialysis model runs through a defined period (e.g. one year) and simulates the progression of patients through phases of COVID infection: negative, positive (with some requiring inpatient care) and recovered or died. The speed of progression of infection through the population may be varied (typically 3-12 months).
As patients change COVID state the model seeks to place them in the appropriate unit and session, opening up COVID-positive sessions in units that allow it. COVID-positive patients do not mix with any other patients. Opening up COVID-positive sessions causes other patients to be displaced from that session, and the model seeks to reallocate them either to the same unit or, if there is no space left, to the closest alternative unit.
When allocating patients to units, the following search strategy is employed.
* *COVID negative*: First look for place in current unit attended. If no room there place in the closest unit (judged by estimated travel time) with available space.
* *COVID-positive*: Open up sessions in units for COVID positive patients in an order specified in the input files. If a new COVID session is required, the model will displace all COVID negative patients in that session, and seek to re-allocate them according to the rules for allocating COVID negative patients. COVID-positive sessions are converted back to COVID negative sessions when they are no longer needed.
* COVID-positive inpatient*: All inpatients are placed in Queen Alexandra Hospital, Portsmouth (though the model allows searching by travel time if another unit were to open to renal COVID-positive inpatients). If a new COVID session is required, the model will displace all COVID negative patients in that session, and seek to re-allocate them according to the rules for allocating COVID negative patients.
* *COVID-recovered*: Treat as COVID negative.
* *Unallocated patients*: If a patient cannot be allocated to any unit, the model attempts to allocate them each day.
Patients, in the model, may end up being cared for at a more distant unit than their starting unit. Once every week, the model seeks to reallocate patients back to their starting unit, or closest available unit if room in their starting unit is not available. This will also compress COVID-positive patients into as few units and sessions as possible.
## Input files
### Unit capacity and use
The input file *./sim/units.csv* allows definition and use of units:
* *unit*: the name used in outputs.
* *subunit*: units may be broken down into two or more subunits. This may be done, for example, if only a part of the unit will be made available to COVID-19 positive patients.
* *Chairs*: the number of dialysis chairs available in each session.
* *inpatient*: Set to 1 for hospitals that can accept COVID postive dialysis inpatients.
* *Allow cov +*: a value of 1 indicates that sessions for that unit, or subunit, may be made available to COVID positive patients.
* *Cov +ve order*: The order in which units open up for COVID positive dialysis out patients.
* *Mon_1 thru Tues_3*: Three sessions per day on Mon/Tues (which repeat on Wed/Thurs and Fri/Sat). A 1 indicates that the session is open for booking.
### Patients
The input file *./sim/units.csv* contains information on patients:
* *Patient ID*: Any id of patient.
* *Patient type*: Not currently used in model.
* *Postcode sector*: Home postcode sector of patient.
* *Site*: Site patient currently attends.
* *Subunit*: Allocation of patient to subunit (if subunits use, you can simply assign them to any of them at the beginning of the model).
* *Site postcode*: Postcode of dialysis unit
* *COVID status*: Can be set to *positive* if patients known to be positive at the start of the model run.
* *first_day*: Either *Mon* or *Tues* for patients having dialysis Mon/Wed/Fri or Tues/Thurs/Sat.
* count*: set to 1 for all patients.
### Travel matrix
The input file *./sim/travel_matrix.csv* contains travel times (minutes) from all patient postcode sectors to all dialysis units. We used Routino (routino.org) to obtain travel times.
## Code and example
```
import sim_replicate as sim
from sim.parameters import Scenario, Uniform, Normal
```
Scenarios are defined in dictionaries as below. Multiple sceanrios may be defined and all results are saved to the *./output* folder.
Parameters in the dictionary are:
* *run_length*: Model run in days.
* *total_proportion_people_infected*. The proportion of patients who may be infected in the model. We assume this will be limited by herd immunity (or a vaccine).
* *time_to_infection*: The time from start of model run to the time patients are infected. For time to infection a normal distrubtion is used. The paramters applied are mean, standard deviation, and lower cut-off (use 0 to avoid negative values) in days. In scarios we describe as 3 months we assume that six standard deviations of the distrubution (3 either side of the mean) occur in 3 months, or 90 days, so a standard deviation of 90/6 (or 15) is used.
* *time_positive*: The duration a patient is positive if they remain in outpatient dialysis. A uniform distribution is used.
* *proportion_pos_requiring_inpatient*: The poportion of infected patients who will require inpatient dialysis.
* *time_pos_before_inpatient*: For patients who will receive inpatient care, this is the time spent as a COVID positive outpatient before being hospitalised. A uniform distribution is used.
* *time_inpatient*: The length of stay as an inpatient. A uniform distribution is used.
* *mortality*: The average mortality of dialysis patients who become infected with COVID.
* *random_positive_rate_at_start*: The model allows a proportion of patients to be randomly infected at the start of the model run.
Define a sceanrio and the numebr of model runs below (the replicates will use all available CPU cores).
```
number_of_replications = 30
scenarios = {}
scenarios['base_3_month'] = Scenario(
run_length=150,
total_proportion_people_infected = 0.8,
time_to_infection = Normal(60, 15, 0.0),
time_positive = Uniform(7, 14),
proportion_pos_requiring_inpatient= 0.6,
time_pos_before_inpatient = Uniform(3,7),
time_inpatient = Uniform(7.0, 14.0),
mortality = 0.15,
random_positive_rate_at_start = 0.0
)
```
Run the scenario. Three sets of charts will be outputed for each scenario (and saved with sceanrio names in the *./output* directory:
* Numbers of patients in negative, positive outpatient, positive inpatient, recovered/died stages of COVID:
* Number of patients displaced from their starting dialysis unit, and how much extra travel time there is to their unit of current care.
* Numbers of patients (negative/recovered, positive outpatient, pisitive inpatient) at each dialysis unit.
```
sim.run_replications(scenarios, number_of_replications)
```
|
github_jupyter
|
import sim_replicate as sim
from sim.parameters import Scenario, Uniform, Normal
number_of_replications = 30
scenarios = {}
scenarios['base_3_month'] = Scenario(
run_length=150,
total_proportion_people_infected = 0.8,
time_to_infection = Normal(60, 15, 0.0),
time_positive = Uniform(7, 14),
proportion_pos_requiring_inpatient= 0.6,
time_pos_before_inpatient = Uniform(3,7),
time_inpatient = Uniform(7.0, 14.0),
mortality = 0.15,
random_positive_rate_at_start = 0.0
)
sim.run_replications(scenarios, number_of_replications)
| 0.359139 | 0.979056 |
## Adversarial Autoencoders on fMRI Images for Automatic Data Generation
In this notebook and the corresponding repository, we will focus on the use of adversarial autoencoders to attack problems relating to generating a usable embedding for the space of sMRI images for the purpose of understanding psychiatric diseases.
This notebook written for and executed with Python 3.5, Keras 2.1.2 and Tensorflow r1.4.
### Data Setup
We will be using specifically preprocessed data for the project from ABIDE 1&2. Assuming that the instructions have been followed, we continue by selecting the 2mm dataset and .
```
import os
import string
import gzip
nii_files = []
for dirpath, sf, files in os.walk('depi-dataset_01'):
if 'anat_mni_2mm.nii.gz' in files:
nii_files.append(os.path.join(dirpath, 'anat_mni_2mm.nii.gz'))
for i in nii_files:
decompressed_file = gzip.open(i)
out_path = i.replace('/','_')[:-3]
with open('depi_nii/' + out_path, 'wb') as outfile:
outfile.write(decompressed_file.read())
```
### Data Processing
Data processing for this dataset is relatively straightforward; we delete the final voxel (which is always 0) so that the dimensionality of the spatial tensor has a high GCD.
```
import nibabel as nib
import numpy as np
import copy
import h5py
import os
def save_large_dataset(file_name, variable):
h5f = h5py.File(file_name + '.h5', 'w')
h5f.create_dataset('variable', data=variable)
h5f.close()
indir = 'depi_nii/'
Xs = []
for root, dirs, filenames in os.walk(indir):
for f in filenames:
if '.nii' == f[-4:]:
img = nib.load(indir + f)
data = img.dataobj # Get the data object
data = data[:-1,:-1,:-1] # Clean the last dimension for a high GCD (all values are 0)
X = np.expand_dims(data, -1)
X = X / np.max(X)
X = X.astype('float32')
X = np.expand_dims(X, 0)
print('Shape: ', X.shape)
Xs.append(X)
Xa = np.vstack(Xs)
save_large_dataset('Xa', Xa)
```
### Training
We will use the [keras-adversarial library](https://github.com/bstriner/keras-adversarial) to help us with our training. We use [this example](https://github.com/bstriner/keras-adversarial/blob/master/examples/example_aae_cifar10.py) as a basis.
```
import os
import h5py
import numpy as np
import pandas as pd
import matplotlib as mpl
mpl.use('Agg')
from keras.layers import Input, Reshape, Flatten, Lambda, Dense, Conv3D, MaxPooling3D, UpSampling3D, TimeDistributed
from keras.models import Sequential, Model
from keras.optimizers import Adam
import keras.backend as K
from keras.engine.topology import Layer
from keras_adversarial.legacy import l1l2, Dense, fit, Convolution2D
from keras_adversarial import AdversarialModel, fix_names, n_choice
from keras_adversarial import AdversarialOptimizerSimultaneous, normal_latent_sampling
from keras.layers import LeakyReLU, Activation
import tensorflow as tf
import numpy as np
class SamplingLayer2D(Layer):
def __init__(self, batch_size, n = 10, std = 1.0, **kwargs):
self.n = n
self.batch_size = batch_size
self.std = std
super(SamplingLayer2D, self).__init__(**kwargs)
def build(self, input_shape):
self.in_shape = input_shape
choices = list(np.arange(self.n))
choice_weights = np.arange(self.n) * (2 * np.pi) / (self.n)
W = np.vstack((np.cos(choice_weights), np.sin(choice_weights))).T
self.W = K.variable(value=W)
super(SamplingLayer2D, self).build(input_shape)
def call(self, x):
x = tf.tile(self.W, tf.constant([self.batch_size, 1]))
y = x + K.random_normal(shape=K.shape(x), mean = 0.0, stddev=self.std)
y = K.reshape(y, tf.stack((-1, 2)))
y = tf.random_shuffle(y)
y = y[:self.batch_size, :]
y = K.reshape(y, tf.stack((-1, 2)))
return y
def compute_output_shape(self, input_shape):
return (self.batch_size, 2)
def load_large_dataset(file_name):
h5f = h5py.File(file_name + '.h5','r')
variable = h5f['variable'][:]
h5f.close()
return variable
X = load_large_dataset('Xa')
def model_generator(latent_dim):
latent_dim_2 = 3
input_layer = Input((latent_dim,))
x = Dense(5*6*5)(input_layer)
x = Reshape((5, 6, 5, 1))(x)
x = Conv3D(16, 3, 3, 3, activation='relu', border_mode='same')(x)
x = UpSampling3D((2, 2, 2))(x)
x = Conv3D(16, 3, 3, 3, activation='relu', border_mode='same')(x)
x = UpSampling3D((3, 3, 3))(x)
x = Conv3D(16, 3, 3, 3, activation='relu', border_mode='same')(x)
x = UpSampling3D((3, 3, 3))(x)
#x = Conv3D(32, 5, 5, 5, activation='relu', border_mode='same')(x)
#x = UpSampling3D((2, 2, 2))(x)
x = Conv3D(16, 3, 3, 3, activation='relu', border_mode='same')(x)
x = Conv3D(16, 7, 7, 7, activation='relu', border_mode='same')(x)
generated = Conv3D(1, 7, 7, 7, activation='linear', border_mode='same')(x)
return Model(input_layer, generated, name='decoder')
def model_encoder(latent_dim, input_shape, reg=lambda: l1l2(1e-7, 0)):
input_layer = Input(shape=X.shape[1:]) # Create the Input Layer
x = Conv3D(8, 3, 3, 3, activation='relu', border_mode='same')(input_layer)
#x = Conv3D(8, 3, 3, 3, activation='relu', border_mode='same')(x)
x = MaxPooling3D((2, 2, 2), padding='same')(x)
x = Conv3D(16, 3, 3, 3, activation='relu', border_mode='same')(x)
#x = Conv3D(16, 3, 3, 3, activation='relu', border_mode='same')(x)
x = MaxPooling3D((3, 3, 3), padding='same')(x)
x = Conv3D(32, 3, 3, 3, activation='relu', border_mode='same')(x)
#x = Conv3D(32, 3, 3, 3, activation='relu', border_mode='same')(x)
x = MaxPooling3D((3, 3, 3), padding='same')(x)
x = Flatten()(x)
encoded = Dense(latent_dim)(x)
#mu = Dense(latent_dim, name="encoder_mu", W_regularizer=reg())(x)
#log_sigma_sq = Dense(latent_dim, name="encoder_log_sigma_sq", W_regularizer=reg())(x)
#encoded = Lambda(lambda mulss : mulss[0] + K.random_normal(K.shape(mulss[0])) * K.exp(mulss[1] / 2),
# output_shape= (latent_dim,))([mu, log_sigma_sq])
return Model(input_layer, encoded, name="encoder")
def model_discriminator(latent_dim, output_dim=1, units=256, reg=lambda: l1l2(1e-7, 1e-7)):
input_layer = Input((latent_dim,))
x = Dense(512, activation = 'tanh')(input_layer)
x = Dense(64, activation = 'tanh')(x)
y = Dense(1, activation = 'sigmoid')(x)
return Model(input_layer, y)
def example_aae(path, adversarial_optimizer, latent_dim = 32):
input_shape = X.shape[1:]
# Specify the generator (z -> x)
generator = model_generator(latent_dim)
# Specify the encoder (x -> z)
encoder = model_encoder(latent_dim, input_shape)
# Combining the encoder and the generator, specify the autoencoder (x -> x')
autoencoder = Model(encoder.inputs, generator(encoder(encoder.inputs)))
# Specify the discriminator (z -> y)
discriminator = model_discriminator(latent_dim)
# build the AAE
x = encoder.inputs[0]
z = encoder(x)
xpred = generator(z)
zreal = SamplingLayer2D(10)(x)
yreal = discriminator(zreal)
yfake = discriminator(z)
aae = Model(x, fix_names([xpred, yfake, yreal], ["xpred", "yfake", "yreal"]))
# Generate summaries for the models
generator.summary()
encoder.summary()
discriminator.summary()
autoencoder.summary()
# Build the adversarial model
generative_params = generator.trainable_weights + encoder.trainable_weights
model = AdversarialModel(base_model=aae,
player_params=[generative_params, discriminator.trainable_weights],
player_names=["generator", "discriminator"])
model.adversarial_compile(adversarial_optimizer=adversarial_optimizer,
player_optimizers=[Adam(3e-4, decay=1e-4), Adam(1e-3, decay=1e-4)],
loss={"yfake": "binary_crossentropy", "yreal": "binary_crossentropy",
"xpred": "mean_squared_error"},
player_compile_kwargs=[{"loss_weights": {"yfake": 1e-1, "yreal": 1e-1,
"xpred": 1e2}}] * 2)
# Split our data into training and testing
xtrain = X[:1000]
xtest = X[1000:1050]
# train network
# generator, discriminator; pred, yfake, yreal
n = xtrain.shape[0]
y = [xtrain, np.ones((n,)), np.zeros((n,)), xtrain, np.zeros((n,)), np.ones((n,))]
ntest = xtest.shape[0]
ytest = [xtest, np.ones((ntest,)), np.zeros((ntest,)), xtest, np.zeros((ntest,)), np.ones((ntest,))]
history = fit(model, x=xtrain, y=y, validation_data=(xtest, ytest),
callbacks=[],
nb_epoch=100, batch_size=10)
# save history
df = pd.DataFrame(history.history)
df.to_csv(os.path.join(path, "history.csv"))
# save model
encoder.save(os.path.join(path, "encoder.h5"))
generator.save(os.path.join(path, "generator.h5"))
discriminator.save(os.path.join(path, "discriminator.h5"))
example_aae("aae-smri-2", AdversarialOptimizerSimultaneous(), latent_dim=2)
```
### Testing
Here, we load up the saved models to test whether our models can produce anything visually impressive:
```
import os
from keras.layers import Input, Dense, Conv3D, MaxPooling3D, UpSampling3D, Flatten, TimeDistributed, Reshape
from keras.models import load_model
from keras.optimizers import SGD
from keras.layers import Input, Dense, Reshape, Flatten, Permute, TimeDistributed, Activation, Lambda, multiply, subtract, concatenate
from keras.layers import SimpleRNN, LSTM, GRU, Conv2D, MaxPooling2D
from keras.losses import kullback_leibler_divergence
from keras.regularizers import L1L2, Regularizer
from keras.engine.topology import Layer
from keras.models import Model
from keras import backend as K
import tensorflow as tf
import numpy as np
import h5py
def load_large_dataset(file_name):
h5f = h5py.File(file_name + '.h5','r')
variable = h5f['variable'][:]
h5f.close()
return variable
X = load_large_dataset('Xa')
latent_dim = 2
class SamplingLayer2D(Layer):
def __init__(self, batch_size, n = 10, std = 1.0, **kwargs):
self.n = n
self.batch_size = batch_size
self.std = std
super(SamplingLayer2D, self).__init__(**kwargs)
def build(self, input_shape):
self.in_shape = input_shape
choices = list(np.arange(self.n))
choice_weights = np.arange(self.n) * (2 * np.pi) / (self.n)
W = np.vstack((np.cos(choice_weights), np.sin(choice_weights))).T
self.W = K.variable(value=W)
super(SamplingLayer2D, self).build(input_shape)
def call(self, x):
#x = tf.einsum('ai,jk->ajk', x, self.W)
x = tf.tile(self.W, tf.constant([self.batch_size, 1]))
#element_choices = tf.convert_to_tensor(self.choices)
#indices = tf.multinomial(tf.log([self.weights]), self.batch_size)
#y = self.W + K.random_normal(shape=K.shape(self.W), mean = 0.0, stddev=self.std)
y = x + K.random_normal(shape=K.shape(x), mean = 0.0, stddev=self.std)
y = K.reshape(y, tf.stack((-1, 2)))
y = tf.random_shuffle(y)
y = y[:self.batch_size, :]
y = K.reshape(y, tf.stack((-1, 2)))
return y
def compute_output_shape(self, input_shape):
return (self.batch_size, 2)
data = np.zeros((96, 1))
i = Input(shape=data.shape[1:])
x = SamplingLayer2D(32, std=0.05)(i)
sampler = Model(inputs=i, outputs=[x])
sampler.compile(optimizer='sgd', loss='mse')
zsamples = sampler.predict(data, batch_size = 32)
encoder = load_model('aae-smri-2/encoder.h5')
generator = load_model('aae-smri-2/generator.h5')
#discriminator = load_model('aae-fmri/discriminator.h5')
#zsamples = np.random.normal(size=(100, latent_dim))
outs = generator.predict(zsamples, batch_size = 2)
# random_generator = generator(normal_latent_sampling((X.shape[1], latent_dim,)))(input_layer)
```
Let us first show a 2D slice from the real data:
```
from matplotlib import pyplot as plt
my_slice = X[0,:,:,30,0]
%matplotlib inline
plt.imshow(my_slice, interpolation='nearest')
plt.show()
```
Let us now show an output from the trained model:
```
from matplotlib import pyplot as plt
my_slice = outs[55,:,:,30,0]
%matplotlib inline
plt.imshow(my_slice / np.max(my_slice), interpolation='nearest')
plt.show()
```
It is alive!
|
github_jupyter
|
import os
import string
import gzip
nii_files = []
for dirpath, sf, files in os.walk('depi-dataset_01'):
if 'anat_mni_2mm.nii.gz' in files:
nii_files.append(os.path.join(dirpath, 'anat_mni_2mm.nii.gz'))
for i in nii_files:
decompressed_file = gzip.open(i)
out_path = i.replace('/','_')[:-3]
with open('depi_nii/' + out_path, 'wb') as outfile:
outfile.write(decompressed_file.read())
import nibabel as nib
import numpy as np
import copy
import h5py
import os
def save_large_dataset(file_name, variable):
h5f = h5py.File(file_name + '.h5', 'w')
h5f.create_dataset('variable', data=variable)
h5f.close()
indir = 'depi_nii/'
Xs = []
for root, dirs, filenames in os.walk(indir):
for f in filenames:
if '.nii' == f[-4:]:
img = nib.load(indir + f)
data = img.dataobj # Get the data object
data = data[:-1,:-1,:-1] # Clean the last dimension for a high GCD (all values are 0)
X = np.expand_dims(data, -1)
X = X / np.max(X)
X = X.astype('float32')
X = np.expand_dims(X, 0)
print('Shape: ', X.shape)
Xs.append(X)
Xa = np.vstack(Xs)
save_large_dataset('Xa', Xa)
import os
import h5py
import numpy as np
import pandas as pd
import matplotlib as mpl
mpl.use('Agg')
from keras.layers import Input, Reshape, Flatten, Lambda, Dense, Conv3D, MaxPooling3D, UpSampling3D, TimeDistributed
from keras.models import Sequential, Model
from keras.optimizers import Adam
import keras.backend as K
from keras.engine.topology import Layer
from keras_adversarial.legacy import l1l2, Dense, fit, Convolution2D
from keras_adversarial import AdversarialModel, fix_names, n_choice
from keras_adversarial import AdversarialOptimizerSimultaneous, normal_latent_sampling
from keras.layers import LeakyReLU, Activation
import tensorflow as tf
import numpy as np
class SamplingLayer2D(Layer):
def __init__(self, batch_size, n = 10, std = 1.0, **kwargs):
self.n = n
self.batch_size = batch_size
self.std = std
super(SamplingLayer2D, self).__init__(**kwargs)
def build(self, input_shape):
self.in_shape = input_shape
choices = list(np.arange(self.n))
choice_weights = np.arange(self.n) * (2 * np.pi) / (self.n)
W = np.vstack((np.cos(choice_weights), np.sin(choice_weights))).T
self.W = K.variable(value=W)
super(SamplingLayer2D, self).build(input_shape)
def call(self, x):
x = tf.tile(self.W, tf.constant([self.batch_size, 1]))
y = x + K.random_normal(shape=K.shape(x), mean = 0.0, stddev=self.std)
y = K.reshape(y, tf.stack((-1, 2)))
y = tf.random_shuffle(y)
y = y[:self.batch_size, :]
y = K.reshape(y, tf.stack((-1, 2)))
return y
def compute_output_shape(self, input_shape):
return (self.batch_size, 2)
def load_large_dataset(file_name):
h5f = h5py.File(file_name + '.h5','r')
variable = h5f['variable'][:]
h5f.close()
return variable
X = load_large_dataset('Xa')
def model_generator(latent_dim):
latent_dim_2 = 3
input_layer = Input((latent_dim,))
x = Dense(5*6*5)(input_layer)
x = Reshape((5, 6, 5, 1))(x)
x = Conv3D(16, 3, 3, 3, activation='relu', border_mode='same')(x)
x = UpSampling3D((2, 2, 2))(x)
x = Conv3D(16, 3, 3, 3, activation='relu', border_mode='same')(x)
x = UpSampling3D((3, 3, 3))(x)
x = Conv3D(16, 3, 3, 3, activation='relu', border_mode='same')(x)
x = UpSampling3D((3, 3, 3))(x)
#x = Conv3D(32, 5, 5, 5, activation='relu', border_mode='same')(x)
#x = UpSampling3D((2, 2, 2))(x)
x = Conv3D(16, 3, 3, 3, activation='relu', border_mode='same')(x)
x = Conv3D(16, 7, 7, 7, activation='relu', border_mode='same')(x)
generated = Conv3D(1, 7, 7, 7, activation='linear', border_mode='same')(x)
return Model(input_layer, generated, name='decoder')
def model_encoder(latent_dim, input_shape, reg=lambda: l1l2(1e-7, 0)):
input_layer = Input(shape=X.shape[1:]) # Create the Input Layer
x = Conv3D(8, 3, 3, 3, activation='relu', border_mode='same')(input_layer)
#x = Conv3D(8, 3, 3, 3, activation='relu', border_mode='same')(x)
x = MaxPooling3D((2, 2, 2), padding='same')(x)
x = Conv3D(16, 3, 3, 3, activation='relu', border_mode='same')(x)
#x = Conv3D(16, 3, 3, 3, activation='relu', border_mode='same')(x)
x = MaxPooling3D((3, 3, 3), padding='same')(x)
x = Conv3D(32, 3, 3, 3, activation='relu', border_mode='same')(x)
#x = Conv3D(32, 3, 3, 3, activation='relu', border_mode='same')(x)
x = MaxPooling3D((3, 3, 3), padding='same')(x)
x = Flatten()(x)
encoded = Dense(latent_dim)(x)
#mu = Dense(latent_dim, name="encoder_mu", W_regularizer=reg())(x)
#log_sigma_sq = Dense(latent_dim, name="encoder_log_sigma_sq", W_regularizer=reg())(x)
#encoded = Lambda(lambda mulss : mulss[0] + K.random_normal(K.shape(mulss[0])) * K.exp(mulss[1] / 2),
# output_shape= (latent_dim,))([mu, log_sigma_sq])
return Model(input_layer, encoded, name="encoder")
def model_discriminator(latent_dim, output_dim=1, units=256, reg=lambda: l1l2(1e-7, 1e-7)):
input_layer = Input((latent_dim,))
x = Dense(512, activation = 'tanh')(input_layer)
x = Dense(64, activation = 'tanh')(x)
y = Dense(1, activation = 'sigmoid')(x)
return Model(input_layer, y)
def example_aae(path, adversarial_optimizer, latent_dim = 32):
input_shape = X.shape[1:]
# Specify the generator (z -> x)
generator = model_generator(latent_dim)
# Specify the encoder (x -> z)
encoder = model_encoder(latent_dim, input_shape)
# Combining the encoder and the generator, specify the autoencoder (x -> x')
autoencoder = Model(encoder.inputs, generator(encoder(encoder.inputs)))
# Specify the discriminator (z -> y)
discriminator = model_discriminator(latent_dim)
# build the AAE
x = encoder.inputs[0]
z = encoder(x)
xpred = generator(z)
zreal = SamplingLayer2D(10)(x)
yreal = discriminator(zreal)
yfake = discriminator(z)
aae = Model(x, fix_names([xpred, yfake, yreal], ["xpred", "yfake", "yreal"]))
# Generate summaries for the models
generator.summary()
encoder.summary()
discriminator.summary()
autoencoder.summary()
# Build the adversarial model
generative_params = generator.trainable_weights + encoder.trainable_weights
model = AdversarialModel(base_model=aae,
player_params=[generative_params, discriminator.trainable_weights],
player_names=["generator", "discriminator"])
model.adversarial_compile(adversarial_optimizer=adversarial_optimizer,
player_optimizers=[Adam(3e-4, decay=1e-4), Adam(1e-3, decay=1e-4)],
loss={"yfake": "binary_crossentropy", "yreal": "binary_crossentropy",
"xpred": "mean_squared_error"},
player_compile_kwargs=[{"loss_weights": {"yfake": 1e-1, "yreal": 1e-1,
"xpred": 1e2}}] * 2)
# Split our data into training and testing
xtrain = X[:1000]
xtest = X[1000:1050]
# train network
# generator, discriminator; pred, yfake, yreal
n = xtrain.shape[0]
y = [xtrain, np.ones((n,)), np.zeros((n,)), xtrain, np.zeros((n,)), np.ones((n,))]
ntest = xtest.shape[0]
ytest = [xtest, np.ones((ntest,)), np.zeros((ntest,)), xtest, np.zeros((ntest,)), np.ones((ntest,))]
history = fit(model, x=xtrain, y=y, validation_data=(xtest, ytest),
callbacks=[],
nb_epoch=100, batch_size=10)
# save history
df = pd.DataFrame(history.history)
df.to_csv(os.path.join(path, "history.csv"))
# save model
encoder.save(os.path.join(path, "encoder.h5"))
generator.save(os.path.join(path, "generator.h5"))
discriminator.save(os.path.join(path, "discriminator.h5"))
example_aae("aae-smri-2", AdversarialOptimizerSimultaneous(), latent_dim=2)
import os
from keras.layers import Input, Dense, Conv3D, MaxPooling3D, UpSampling3D, Flatten, TimeDistributed, Reshape
from keras.models import load_model
from keras.optimizers import SGD
from keras.layers import Input, Dense, Reshape, Flatten, Permute, TimeDistributed, Activation, Lambda, multiply, subtract, concatenate
from keras.layers import SimpleRNN, LSTM, GRU, Conv2D, MaxPooling2D
from keras.losses import kullback_leibler_divergence
from keras.regularizers import L1L2, Regularizer
from keras.engine.topology import Layer
from keras.models import Model
from keras import backend as K
import tensorflow as tf
import numpy as np
import h5py
def load_large_dataset(file_name):
h5f = h5py.File(file_name + '.h5','r')
variable = h5f['variable'][:]
h5f.close()
return variable
X = load_large_dataset('Xa')
latent_dim = 2
class SamplingLayer2D(Layer):
def __init__(self, batch_size, n = 10, std = 1.0, **kwargs):
self.n = n
self.batch_size = batch_size
self.std = std
super(SamplingLayer2D, self).__init__(**kwargs)
def build(self, input_shape):
self.in_shape = input_shape
choices = list(np.arange(self.n))
choice_weights = np.arange(self.n) * (2 * np.pi) / (self.n)
W = np.vstack((np.cos(choice_weights), np.sin(choice_weights))).T
self.W = K.variable(value=W)
super(SamplingLayer2D, self).build(input_shape)
def call(self, x):
#x = tf.einsum('ai,jk->ajk', x, self.W)
x = tf.tile(self.W, tf.constant([self.batch_size, 1]))
#element_choices = tf.convert_to_tensor(self.choices)
#indices = tf.multinomial(tf.log([self.weights]), self.batch_size)
#y = self.W + K.random_normal(shape=K.shape(self.W), mean = 0.0, stddev=self.std)
y = x + K.random_normal(shape=K.shape(x), mean = 0.0, stddev=self.std)
y = K.reshape(y, tf.stack((-1, 2)))
y = tf.random_shuffle(y)
y = y[:self.batch_size, :]
y = K.reshape(y, tf.stack((-1, 2)))
return y
def compute_output_shape(self, input_shape):
return (self.batch_size, 2)
data = np.zeros((96, 1))
i = Input(shape=data.shape[1:])
x = SamplingLayer2D(32, std=0.05)(i)
sampler = Model(inputs=i, outputs=[x])
sampler.compile(optimizer='sgd', loss='mse')
zsamples = sampler.predict(data, batch_size = 32)
encoder = load_model('aae-smri-2/encoder.h5')
generator = load_model('aae-smri-2/generator.h5')
#discriminator = load_model('aae-fmri/discriminator.h5')
#zsamples = np.random.normal(size=(100, latent_dim))
outs = generator.predict(zsamples, batch_size = 2)
# random_generator = generator(normal_latent_sampling((X.shape[1], latent_dim,)))(input_layer)
from matplotlib import pyplot as plt
my_slice = X[0,:,:,30,0]
%matplotlib inline
plt.imshow(my_slice, interpolation='nearest')
plt.show()
from matplotlib import pyplot as plt
my_slice = outs[55,:,:,30,0]
%matplotlib inline
plt.imshow(my_slice / np.max(my_slice), interpolation='nearest')
plt.show()
| 0.587707 | 0.910027 |
# Context aware Neural network Model
## Convert PyTorch Models to TensorFlow
```
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
import matplotlib.pyplot as plt
import torch.optim as optim
import tensorflow as tf
import torch.nn as nn
import numpy as np
import torch
import onnx
import time
import os
import sys
import cv2
from onnx_tf.backend import prepare
sys.path.insert(1, '/home/ubuntu/mayub/Github/Context-Aware_Crowd_Counting-pytorch/')
import cannet as cann_model
MODEL_PARAM_PATH = '/home/ubuntu/mayub/Github/Context-Aware_Crowd_Counting-pytorch/cvpr2019_CAN_SHHA_353.pth'
model= cann_model.CANNet()
device=torch.device("cpu")
model.load_state_dict(torch.load(MODEL_PARAM_PATH, map_location=torch.device('cpu')))
model.to(device)
model.eval()
print(model)
```
### Preprocess the Image for CANNet model (context aware model)
```
def read_and_convert_img(image_path):
img=plt.imread(image_path)/255
print(len(img.shape))
if len(img.shape)==2:
# expand grayscale image to three channel.
img=img[:,:,np.newaxis]
img=np.concatenate((img,img,img),2)
print(img.shape[0])
print(img.shape[1])
ds_rows=int(img.shape[0]//8) # Downsampling to match model size
ds_cols=int(img.shape[1]//8)
print(ds_rows)
print(ds_cols)
img = cv2.resize(img,(ds_cols*8,ds_rows*8))
print(img.shape)
img=img.transpose((2,0,1)) # convert to order (channel,rows,cols)
img_tensor=torch.tensor(img,dtype=torch.float)
img_tensor=transforms.functional.normalize(img_tensor,mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
img_tensor=img_tensor.view(1,img_tensor.shape[0],img_tensor.shape[1],img_tensor.shape[2])
print(img.shape)
print(img_tensor.shape)
#img_tensor = np.expand_dims(img_tensor,axis = 0)
return img_tensor
```
### Check Input image dimentions
```
image_input= read_and_convert_img('/home/ubuntu/CrowdSourcing_Projects/CSRNet-keras/data/shanghaitech/part_A_final/test_data/images/IMG_1.jpg')
```
### Run the Prediction on sample image to test model
```
image_input=image_input.to(device)
et_dmap=model(image_input)
print(et_dmap.data.sum())
```
### Export the model in ONNX format
```
# Export to ONNX format
torch.onnx.export(model, image_input, './model_simple.onnx', input_names=['image_input'], output_names=['image_output'],operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK)
```
### Prepare the model for TensorFlow export
```
# Load ONNX model and convert to TensorFlow format
model_onnx = onnx.load('./model_simple.onnx')
# tf_rep = prepare(model_onnx)
# Export model as .pb file
#tf_rep.export_graph('./model_simple.pb')
tf_rep = prepare(model_onnx,device='CPU')
```
## FAILS !! Aten op is not implemented in ONNX
```
prepare()
```
|
github_jupyter
|
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
import matplotlib.pyplot as plt
import torch.optim as optim
import tensorflow as tf
import torch.nn as nn
import numpy as np
import torch
import onnx
import time
import os
import sys
import cv2
from onnx_tf.backend import prepare
sys.path.insert(1, '/home/ubuntu/mayub/Github/Context-Aware_Crowd_Counting-pytorch/')
import cannet as cann_model
MODEL_PARAM_PATH = '/home/ubuntu/mayub/Github/Context-Aware_Crowd_Counting-pytorch/cvpr2019_CAN_SHHA_353.pth'
model= cann_model.CANNet()
device=torch.device("cpu")
model.load_state_dict(torch.load(MODEL_PARAM_PATH, map_location=torch.device('cpu')))
model.to(device)
model.eval()
print(model)
def read_and_convert_img(image_path):
img=plt.imread(image_path)/255
print(len(img.shape))
if len(img.shape)==2:
# expand grayscale image to three channel.
img=img[:,:,np.newaxis]
img=np.concatenate((img,img,img),2)
print(img.shape[0])
print(img.shape[1])
ds_rows=int(img.shape[0]//8) # Downsampling to match model size
ds_cols=int(img.shape[1]//8)
print(ds_rows)
print(ds_cols)
img = cv2.resize(img,(ds_cols*8,ds_rows*8))
print(img.shape)
img=img.transpose((2,0,1)) # convert to order (channel,rows,cols)
img_tensor=torch.tensor(img,dtype=torch.float)
img_tensor=transforms.functional.normalize(img_tensor,mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
img_tensor=img_tensor.view(1,img_tensor.shape[0],img_tensor.shape[1],img_tensor.shape[2])
print(img.shape)
print(img_tensor.shape)
#img_tensor = np.expand_dims(img_tensor,axis = 0)
return img_tensor
image_input= read_and_convert_img('/home/ubuntu/CrowdSourcing_Projects/CSRNet-keras/data/shanghaitech/part_A_final/test_data/images/IMG_1.jpg')
image_input=image_input.to(device)
et_dmap=model(image_input)
print(et_dmap.data.sum())
# Export to ONNX format
torch.onnx.export(model, image_input, './model_simple.onnx', input_names=['image_input'], output_names=['image_output'],operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK)
# Load ONNX model and convert to TensorFlow format
model_onnx = onnx.load('./model_simple.onnx')
# tf_rep = prepare(model_onnx)
# Export model as .pb file
#tf_rep.export_graph('./model_simple.pb')
tf_rep = prepare(model_onnx,device='CPU')
prepare()
| 0.45302 | 0.862178 |
```
from ipywidgets import interactive, interact
import matplotlib.pyplot as plt
import numpy as np
import ipywidgets as widgets
import sympy as sym
import seaborn as sns
import plotly.graph_objects as go
from plotly.offline import init_notebook_mode, iplot
from numba import jit
init_notebook_mode(connected=True)
jit(nopython=True, parallel=True)
sns.set()
```
# Interactive partial sums of sine
Zoufiné Lauer-Baré
```
class plot():
def __init__(self, preWidgetN):
self.N = preWidgetN
x,y,n ,k = sym.symbols('x, y,n,k', real=True)
X=np.linspace(0, 10, 100)
f = sym.Sum((-1)**k*(x**(2*k+1))/(sym.factorial(2*k+1)),(k,0, n))
#f = sym.Sum((-1)**k*(x**(2*k))/(sym.factorial(2*k)),(k,0, n))
#print(sym.latex(f))
f = f.subs(n, self.N.value)
f = sym.lambdify(x, f)
self.trace1 = go.Scatter(x=X, y=np.sin(X),
mode='lines+markers',
name='sin'
)
self.trace2 = go.Scatter(x=X, y=f(X),
mode='lines',
name=r'$\sum_{k=0}^{%s} \frac{\left(-1\right)^{k} x^{2 k + 1}}{\left(2 k + 1\right)!}$' %(self.N.value)
)
layout = go.Layout(template='plotly_dark', title="Partial sums of sine")
self.fig = go.FigureWidget(data=[self.trace1, self.trace2],
layout = layout,
layout_yaxis_range=[-3 , 3],
)
#self.fig.update_layout(title="Plot Title",)
def sineSeries(self, change):
x,y,n ,k = sym.symbols('x, y,n,k', real=True)
X=np.linspace(0, 10, 100)
f = sym.Sum((-1)**k*(x**(2*k+1))/(sym.factorial(2*k+1)),(k,0, n))
#f = sym.Sum((-1)**k*(x**(2*k))/(sym.factorial(2*k)),(k,0, n))
f = f.subs(n, self.N.value)
f = sym.lambdify(x, f)
with self.fig.batch_update():
self.fig.data[1].x = X
self.fig.data[1].y = f(X)
self.fig.data[1].name = r'$\sum_{k=0}^{%s} \frac{\left(-1\right)^{k} x^{2 k + 1}}{\left(2 k + 1\right)!}$' %(self.N.value)
return
def show(self):
self.N.observe(self.sineSeries, names='value')
display(self.N, self.fig)
return
N = widgets.IntSlider(min=0, max=20, step=1, value=0, description='partial sum order')
p = plot(N)
p.show()
```
|
github_jupyter
|
from ipywidgets import interactive, interact
import matplotlib.pyplot as plt
import numpy as np
import ipywidgets as widgets
import sympy as sym
import seaborn as sns
import plotly.graph_objects as go
from plotly.offline import init_notebook_mode, iplot
from numba import jit
init_notebook_mode(connected=True)
jit(nopython=True, parallel=True)
sns.set()
class plot():
def __init__(self, preWidgetN):
self.N = preWidgetN
x,y,n ,k = sym.symbols('x, y,n,k', real=True)
X=np.linspace(0, 10, 100)
f = sym.Sum((-1)**k*(x**(2*k+1))/(sym.factorial(2*k+1)),(k,0, n))
#f = sym.Sum((-1)**k*(x**(2*k))/(sym.factorial(2*k)),(k,0, n))
#print(sym.latex(f))
f = f.subs(n, self.N.value)
f = sym.lambdify(x, f)
self.trace1 = go.Scatter(x=X, y=np.sin(X),
mode='lines+markers',
name='sin'
)
self.trace2 = go.Scatter(x=X, y=f(X),
mode='lines',
name=r'$\sum_{k=0}^{%s} \frac{\left(-1\right)^{k} x^{2 k + 1}}{\left(2 k + 1\right)!}$' %(self.N.value)
)
layout = go.Layout(template='plotly_dark', title="Partial sums of sine")
self.fig = go.FigureWidget(data=[self.trace1, self.trace2],
layout = layout,
layout_yaxis_range=[-3 , 3],
)
#self.fig.update_layout(title="Plot Title",)
def sineSeries(self, change):
x,y,n ,k = sym.symbols('x, y,n,k', real=True)
X=np.linspace(0, 10, 100)
f = sym.Sum((-1)**k*(x**(2*k+1))/(sym.factorial(2*k+1)),(k,0, n))
#f = sym.Sum((-1)**k*(x**(2*k))/(sym.factorial(2*k)),(k,0, n))
f = f.subs(n, self.N.value)
f = sym.lambdify(x, f)
with self.fig.batch_update():
self.fig.data[1].x = X
self.fig.data[1].y = f(X)
self.fig.data[1].name = r'$\sum_{k=0}^{%s} \frac{\left(-1\right)^{k} x^{2 k + 1}}{\left(2 k + 1\right)!}$' %(self.N.value)
return
def show(self):
self.N.observe(self.sineSeries, names='value')
display(self.N, self.fig)
return
N = widgets.IntSlider(min=0, max=20, step=1, value=0, description='partial sum order')
p = plot(N)
p.show()
| 0.320396 | 0.713931 |
# 傾向スコアを用いた TV CM の視聴によるゲーム利用傾向の変化の分析
```
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt
plt.style.use('seaborn-darkgrid')
%config InlineBackend.figure_format = 'retina'
%load_ext autoreload
%autoreload 2
from pycalf import metrics
from pycalf import visualize
from pycalf import propensity
```
まずはじめに、サンプルデータを取得する。
```
# Download from https://raw.githubusercontent.com/iwanami-datascience/vol3/master/kato%26hoshino/q_data_x.csv
df = pd.read_csv('sample/q_data_x.csv')
df.head()
```
傾向スコアを求めるために、共変量・結果変数・介入変数を定義します。
```
# Define variables required for inference.
covariate_cols = [
'TVwatch_day', 'age', 'sex', 'marry_dummy', 'child_dummy', 'inc', 'pmoney',
'area_kanto', 'area_tokai', 'area_keihanshin', 'job_dummy1', 'job_dummy2',
'job_dummy3', 'job_dummy4', 'job_dummy5', 'job_dummy6', 'job_dummy7',
'fam_str_dummy1', 'fam_str_dummy2', 'fam_str_dummy3', 'fam_str_dummy4'
]
outcome_cols = ['gamecount', 'gamedummy', 'gamesecond']
treatment_col = 'cm_dummy'
```
傾向スコアを求めるモデルにはロジスティック回帰を用いるので、共変量をスケーリングします。
そして、IPWを用いたモデルを定義します。
```
# Set Values from dataframe.
X = df[covariate_cols]
y = df[outcome_cols]
treatment = df[treatment_col].astype(bool).to_numpy()
# Scaling Raw Data.
scaler = preprocessing.MinMaxScaler()
scaled_X = scaler.fit_transform(X)
# Define IPW Class.
learner = LogisticRegression(solver='lbfgs', max_iter=1000, random_state=42)
model = propensity.IPW(learner)
# Fit model.
model.fit(scaled_X, treatment)
```
### 効果量dを用いた共変量の調整の様子を確認
IPW によって、共変量のばらつきが調整されたのかを効果量dを用いて確認する。
```
ate_weight = model.get_weight(treatment, mode='ate')
es = metrics.EffectSize()
es.fit(X, treatment, weight=ate_weight)
es.transform()
visualize.plot_effect_size(X, treatment, weight=ate_weight, ascending=True)
```
### AUC と 傾向スコアの分布の可視化
ここでは、AUC と 介入有無別の傾向スコアの分布を可視化する。
AUC は、0.7 以上であることが好ましいとされる。
参考:https://www.jstage.jst.go.jp/article/tenrikiyo/19/2/19_19-008/_pdf
介入有無別の傾向スコアの分布は、ある程度重なりが有りながら介入有無別の分布が別れているので傾向スコアによる調整が行えるように見える。
```
print('F1 Score: ', metrics.f1_score(treatment, model.get_score(), threshold='auto'))
visualize.plot_roc_curve(treatment, model.get_score())
visualize.plot_probability_distribution(treatment, model.get_score())
```
### 平均処置効果(ATE: Average Treated Effect)
IPW による調整後の介入効果
```
outcome = model.estimate_effect(treatment, y.to_numpy(), mode='ate')
pd.DataFrame(outcome, index=['Z0', 'Z1', 'ATE'], columns=y.columns.tolist()).T
outcome_name = 'gamesecond'
z0, z1, treat_effect = model.estimate_effect(treatment, y[outcome_name].to_numpy(), mode='ate')
visualize.plot_treatment_effect(outcome_name, z0, z1, treat_effect.round())
```
### 属性変数を用いた介入効果の推定
```
# Attribute Effect
treatment_col = 'cm_dummy'
y = 'gamesecond'
features = [
'child_dummy', 'area_kanto', 'area_keihan', 'area_tokai', 'area_keihanshin',
'T', 'F1', 'F2', 'F3', 'M1', 'M2', 'M3'
]
attr_effect = metrics.AttributeEffect()
attr_effect.fit(df[features], df[treatment_col], df[y], weight=model.get_weight('ate'))
result = attr_effect.transform()
display(result)
attr_effect.plot_lift_values()
```
|
github_jupyter
|
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt
plt.style.use('seaborn-darkgrid')
%config InlineBackend.figure_format = 'retina'
%load_ext autoreload
%autoreload 2
from pycalf import metrics
from pycalf import visualize
from pycalf import propensity
# Download from https://raw.githubusercontent.com/iwanami-datascience/vol3/master/kato%26hoshino/q_data_x.csv
df = pd.read_csv('sample/q_data_x.csv')
df.head()
# Define variables required for inference.
covariate_cols = [
'TVwatch_day', 'age', 'sex', 'marry_dummy', 'child_dummy', 'inc', 'pmoney',
'area_kanto', 'area_tokai', 'area_keihanshin', 'job_dummy1', 'job_dummy2',
'job_dummy3', 'job_dummy4', 'job_dummy5', 'job_dummy6', 'job_dummy7',
'fam_str_dummy1', 'fam_str_dummy2', 'fam_str_dummy3', 'fam_str_dummy4'
]
outcome_cols = ['gamecount', 'gamedummy', 'gamesecond']
treatment_col = 'cm_dummy'
# Set Values from dataframe.
X = df[covariate_cols]
y = df[outcome_cols]
treatment = df[treatment_col].astype(bool).to_numpy()
# Scaling Raw Data.
scaler = preprocessing.MinMaxScaler()
scaled_X = scaler.fit_transform(X)
# Define IPW Class.
learner = LogisticRegression(solver='lbfgs', max_iter=1000, random_state=42)
model = propensity.IPW(learner)
# Fit model.
model.fit(scaled_X, treatment)
ate_weight = model.get_weight(treatment, mode='ate')
es = metrics.EffectSize()
es.fit(X, treatment, weight=ate_weight)
es.transform()
visualize.plot_effect_size(X, treatment, weight=ate_weight, ascending=True)
print('F1 Score: ', metrics.f1_score(treatment, model.get_score(), threshold='auto'))
visualize.plot_roc_curve(treatment, model.get_score())
visualize.plot_probability_distribution(treatment, model.get_score())
outcome = model.estimate_effect(treatment, y.to_numpy(), mode='ate')
pd.DataFrame(outcome, index=['Z0', 'Z1', 'ATE'], columns=y.columns.tolist()).T
outcome_name = 'gamesecond'
z0, z1, treat_effect = model.estimate_effect(treatment, y[outcome_name].to_numpy(), mode='ate')
visualize.plot_treatment_effect(outcome_name, z0, z1, treat_effect.round())
# Attribute Effect
treatment_col = 'cm_dummy'
y = 'gamesecond'
features = [
'child_dummy', 'area_kanto', 'area_keihan', 'area_tokai', 'area_keihanshin',
'T', 'F1', 'F2', 'F3', 'M1', 'M2', 'M3'
]
attr_effect = metrics.AttributeEffect()
attr_effect.fit(df[features], df[treatment_col], df[y], weight=model.get_weight('ate'))
result = attr_effect.transform()
display(result)
attr_effect.plot_lift_values()
| 0.705176 | 0.878105 |
```
import glob
import gzip
import itertools
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
from scipy import stats
import sys
from Bio.Seq import Seq
from collections import Counter
import plotly.express as px
import plotly.graph_objects as go
import plotly.offline as offline
from plotly.subplots import make_subplots
import seaborn as sns
import matrix_transform
import visualize
%matplotlib inline
sns.set(font="Arial")
sns.set_theme(style="ticks")
colors = ['#D81B60', '#1E88E5', '#FFC107', '#31B547']
sns.set_palette(sns.color_palette(colors))
folder = 'Data/combined_raw_counts/'
empty = []
for residue in range(306):
path_dir = folder + "res" + str(residue+1) + ".csv"
test = pd.read_csv(path_dir)
test['gc_mean'] = (test['gc1']+test['gc2'])/2
test['glu_mean'] = (test['glu1']+test['glu2'])/2
test['gal_mean'] = (test['gal1']+test['gal2'])/2
test['grl_mean'] = (test['grl1']+test['grl2'])/2
test = test.loc[test['glu1']!=test['glu1'].max()]
corr_df = test.corr()
empty.append([residue+1, corr_df['gal_mean'].loc['glu_mean'],
corr_df['gal_mean'].loc['grl_mean'],
corr_df['gal_mean'].loc['gc_mean'],
corr_df['glu_mean'].loc['gc_mean'],
corr_df['glu_mean'].loc['grl_mean'],
corr_df['gc_mean'].loc['grl_mean'],
])
empty_df = pd.DataFrame(empty)
empty_df.columns = ['residue','gal:glu', 'gal:grl', 'gal:gc',
'glu:gc', 'glu:grl', 'gc:grl']
empty_df.to_csv('CSVs/correlations_per_res.csv')
bins = np.linspace(-1, 1, 20)
plt.hist([empty_df['gal:glu'][0:140].append(empty_df['gal:glu'][149:242]),
empty_df['gal:glu'][242:].append(empty_df['gal:glu'][140:149])],
bins, label=['x', 'y'])
plt.show()
bins = np.linspace(0, 1, 20)
plt.hist([empty_df['gal:gc'][0:140].append(empty_df['gal:gc'][149:242]),
empty_df['gal:gc'][242:].append(empty_df['gal:gc'][140:149])],
bins, label=['x', 'y'])
plt.show()
bins = np.linspace(-1, 1, 20)
plt.hist([empty_df['gal:grl'][0:140].append(empty_df['gal:grl'][149:242]),
empty_df['gal:grl'][242:].append(empty_df['gal:grl'][140:149])],
bins, label=['x', 'y'])
plt.show()
bins = np.linspace(0, 1, 20)
plt.hist([empty_df['glu:grl'][0:140].append(empty_df['glu:grl'][149:242]),
empty_df['glu:grl'][242:].append(empty_df['glu:grl'][140:149])],
bins, label=['x', 'y'])
plt.show()
bins = np.linspace(0, 1, 20)
plt.hist([empty_df['glu:gc'][0:140].append(empty_df['glu:gc'][149:242]),
empty_df['glu:gc'][242:].append(empty_df['glu:gc'][140:149])],
bins, label=['x', 'y'])
plt.show()
```
### Percent wildtype
```
raw_count_folder = 'Data/combined_raw_count_foldchange/'
all_percentages = []
for x in range(1,307):
file = 'res' + str(x) + '.csv'
files = pd.read_csv(raw_count_folder+file, index_col = 0)
percentages = []
for col in ['glu1_counts', 'gal1_counts', 'gc1_counts', 'grl1_counts',
'glu2_counts', 'gal2_counts', 'gc2_counts', 'grl2_counts']:
percentages.append(files[col].max())
all_percentages.append(percentages)
percentage_wt = pd.DataFrame(all_percentages)
percentage_wt.columns = ('glu1', 'gal1', 'gc1', 'grl1',
'glu2', 'gal2', 'gc2', 'grl2')
```
## Percent stop codon
```
raw_count_folder = 'Data/combined_raw_count_foldchange/'
cols = ['glu1_counts', 'gal1_counts', 'gc1_counts', 'grl1_counts',
'glu2_counts', 'gal2_counts', 'gc2_counts', 'grl2_counts']
all_percentages = []
for x in range(1,307):
file = 'res' + str(x) + '.csv'
files = pd.read_csv(raw_count_folder+file, index_col = 0)
stop_sum = files[files['site_2'].apply(lambda x: Seq(x).translate())=='*'][cols].sum()
col_sum = files[cols].sum()
all_percentages.append(list(stop_sum))
number_stop = pd.DataFrame(all_percentages)
number_stop.columns = ('glu1', 'gal1', 'gc1', 'grl1',
'glu2', 'gal2', 'gc2', 'grl2')
files[files['glu1_counts'] == files['glu1_counts'].max()]['glu1_counts']/\
files[files['glu1_counts'] == files['glu1_counts'].max()]['gal1_counts']
```
### No synonymous codings
```
folder = 'Data/combined_raw_counts/'
empty = []
for residue in range(306):
path_dir = folder + "res" + str(residue+1) + ".csv"
test = pd.read_csv(path_dir)
test['gc_mean'] = (test['gc1']+test['gc2'])/2
test['glu_mean'] = (test['glu1']+test['glu2'])/2
test['gal_mean'] = (test['gal1']+test['gal2'])/2
test['grl_mean'] = (test['grl1']+test['grl2'])/2
test = test.loc[test['glu1']!=test['glu1'].max()]
corr_df = test.corr()
empty.append([residue+1, corr_df['gal_mean'].loc['glu_mean'],
corr_df['gal_mean'].loc['grl_mean'],
corr_df['gal_mean'].loc['gc_mean'],
corr_df['glu_mean'].loc['gc_mean'],
corr_df['glu_mean'].loc['grl_mean'],
corr_df['gc_mean'].loc['grl_mean'],
])
```
|
github_jupyter
|
import glob
import gzip
import itertools
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
from scipy import stats
import sys
from Bio.Seq import Seq
from collections import Counter
import plotly.express as px
import plotly.graph_objects as go
import plotly.offline as offline
from plotly.subplots import make_subplots
import seaborn as sns
import matrix_transform
import visualize
%matplotlib inline
sns.set(font="Arial")
sns.set_theme(style="ticks")
colors = ['#D81B60', '#1E88E5', '#FFC107', '#31B547']
sns.set_palette(sns.color_palette(colors))
folder = 'Data/combined_raw_counts/'
empty = []
for residue in range(306):
path_dir = folder + "res" + str(residue+1) + ".csv"
test = pd.read_csv(path_dir)
test['gc_mean'] = (test['gc1']+test['gc2'])/2
test['glu_mean'] = (test['glu1']+test['glu2'])/2
test['gal_mean'] = (test['gal1']+test['gal2'])/2
test['grl_mean'] = (test['grl1']+test['grl2'])/2
test = test.loc[test['glu1']!=test['glu1'].max()]
corr_df = test.corr()
empty.append([residue+1, corr_df['gal_mean'].loc['glu_mean'],
corr_df['gal_mean'].loc['grl_mean'],
corr_df['gal_mean'].loc['gc_mean'],
corr_df['glu_mean'].loc['gc_mean'],
corr_df['glu_mean'].loc['grl_mean'],
corr_df['gc_mean'].loc['grl_mean'],
])
empty_df = pd.DataFrame(empty)
empty_df.columns = ['residue','gal:glu', 'gal:grl', 'gal:gc',
'glu:gc', 'glu:grl', 'gc:grl']
empty_df.to_csv('CSVs/correlations_per_res.csv')
bins = np.linspace(-1, 1, 20)
plt.hist([empty_df['gal:glu'][0:140].append(empty_df['gal:glu'][149:242]),
empty_df['gal:glu'][242:].append(empty_df['gal:glu'][140:149])],
bins, label=['x', 'y'])
plt.show()
bins = np.linspace(0, 1, 20)
plt.hist([empty_df['gal:gc'][0:140].append(empty_df['gal:gc'][149:242]),
empty_df['gal:gc'][242:].append(empty_df['gal:gc'][140:149])],
bins, label=['x', 'y'])
plt.show()
bins = np.linspace(-1, 1, 20)
plt.hist([empty_df['gal:grl'][0:140].append(empty_df['gal:grl'][149:242]),
empty_df['gal:grl'][242:].append(empty_df['gal:grl'][140:149])],
bins, label=['x', 'y'])
plt.show()
bins = np.linspace(0, 1, 20)
plt.hist([empty_df['glu:grl'][0:140].append(empty_df['glu:grl'][149:242]),
empty_df['glu:grl'][242:].append(empty_df['glu:grl'][140:149])],
bins, label=['x', 'y'])
plt.show()
bins = np.linspace(0, 1, 20)
plt.hist([empty_df['glu:gc'][0:140].append(empty_df['glu:gc'][149:242]),
empty_df['glu:gc'][242:].append(empty_df['glu:gc'][140:149])],
bins, label=['x', 'y'])
plt.show()
raw_count_folder = 'Data/combined_raw_count_foldchange/'
all_percentages = []
for x in range(1,307):
file = 'res' + str(x) + '.csv'
files = pd.read_csv(raw_count_folder+file, index_col = 0)
percentages = []
for col in ['glu1_counts', 'gal1_counts', 'gc1_counts', 'grl1_counts',
'glu2_counts', 'gal2_counts', 'gc2_counts', 'grl2_counts']:
percentages.append(files[col].max())
all_percentages.append(percentages)
percentage_wt = pd.DataFrame(all_percentages)
percentage_wt.columns = ('glu1', 'gal1', 'gc1', 'grl1',
'glu2', 'gal2', 'gc2', 'grl2')
raw_count_folder = 'Data/combined_raw_count_foldchange/'
cols = ['glu1_counts', 'gal1_counts', 'gc1_counts', 'grl1_counts',
'glu2_counts', 'gal2_counts', 'gc2_counts', 'grl2_counts']
all_percentages = []
for x in range(1,307):
file = 'res' + str(x) + '.csv'
files = pd.read_csv(raw_count_folder+file, index_col = 0)
stop_sum = files[files['site_2'].apply(lambda x: Seq(x).translate())=='*'][cols].sum()
col_sum = files[cols].sum()
all_percentages.append(list(stop_sum))
number_stop = pd.DataFrame(all_percentages)
number_stop.columns = ('glu1', 'gal1', 'gc1', 'grl1',
'glu2', 'gal2', 'gc2', 'grl2')
files[files['glu1_counts'] == files['glu1_counts'].max()]['glu1_counts']/\
files[files['glu1_counts'] == files['glu1_counts'].max()]['gal1_counts']
folder = 'Data/combined_raw_counts/'
empty = []
for residue in range(306):
path_dir = folder + "res" + str(residue+1) + ".csv"
test = pd.read_csv(path_dir)
test['gc_mean'] = (test['gc1']+test['gc2'])/2
test['glu_mean'] = (test['glu1']+test['glu2'])/2
test['gal_mean'] = (test['gal1']+test['gal2'])/2
test['grl_mean'] = (test['grl1']+test['grl2'])/2
test = test.loc[test['glu1']!=test['glu1'].max()]
corr_df = test.corr()
empty.append([residue+1, corr_df['gal_mean'].loc['glu_mean'],
corr_df['gal_mean'].loc['grl_mean'],
corr_df['gal_mean'].loc['gc_mean'],
corr_df['glu_mean'].loc['gc_mean'],
corr_df['glu_mean'].loc['grl_mean'],
corr_df['gc_mean'].loc['grl_mean'],
])
| 0.105193 | 0.512937 |
# Scroll down to get to the interesting tables...
# Construct list of properties of widgets
"Properties" here is one of:
+ `keys`
+ `traits()`
+ `class_own_traits()`
Common (i.e. uninteresting) properties are filtered out.
The dependency on astropy is for their Table. Replace it with pandas if you want...
```
import itertools
from ipywidgets import *
from IPython.display import display
from traitlets import TraitError
from astropy.table import Table, Column
```
# Function definitions
## Calculate "interesting" properties
```
def properties(widget, omit=None, source=None):
"""
Return a list of widget properties for a widget instance, omitting
common properties.
Parameters
----------
widget : ipywidgets.Widget instance
The widget for which the list of preoperties is desired.
omit : list, optional
List of properties to omit in the return value. Default is
``['layout', 'style', 'msg_throttle']``, and for `source='traits'
is extended to add ``['keys', 'comm']``.
source : str, one of 'keys', 'traits', 'class_own_traits', 'style_keys' optional
Source of property list for widget. Default is ``'keys'``.
"""
if source is None:
source = 'keys'
valid_sources = ('keys', 'traits', 'class_own_traits', 'style_keys')
if source not in valid_sources:
raise ValueError('source must be one of {}'.format(', '.join(valid_sources)))
if omit is None:
omit = ['layout', 'style', 'msg_throttle']
if source == 'keys':
props = widget.keys
elif source == 'traits':
props = widget.traits()
omit.extend(['keys', 'comm'])
elif source == 'class_own_traits':
props = widget.class_own_traits()
elif source == 'style_keys':
props = widget.style.keys
props = [k for k in props if not k.startswith('_')]
return [k for k in props if k not in omit]
```
## Create a table (cross-tab style) for which properties are available for which widgets
This is the only place astropy.table.Table is used, so delete if you want to.
```
def table_for_keys(keys, keys_info, source):
unique_keys = set()
for k in keys:
unique_keys.update(keys_info[k])
unique_keys = sorted(unique_keys)
string_it = lambda x: 'X' if x else ''
colnames = ['Property ({})'.format(source)] + keys
columns = [Column(name=colnames[0], data=unique_keys)]
for c in colnames[1:]:
column = Column(name=c, data=[string_it(k in key_dict[c]) for k in unique_keys])
columns.append(column)
return Table(columns)
```
## List of widget objects...
```
widget_list = [
IntSlider,
FloatSlider,
IntRangeSlider,
FloatRangeSlider,
IntProgress,
FloatProgress,
BoundedIntText,
BoundedFloatText,
IntText,
FloatText,
ToggleButton,
Checkbox,
Valid,
Dropdown,
RadioButtons,
Select,
SelectionSlider,
SelectionRangeSlider,
ToggleButtons,
SelectMultiple,
Text,
Textarea,
Label,
HTML,
HTMLMath,
Image,
Button,
Play,
DatePicker,
ColorPicker,
Box,
HBox,
VBox,
Accordion,
Tab
]
```
## ...and their names
```
names = [wd.__name__ for wd in widget_list]
```
## Figure out the properties for each widget
The `try`/`except` below is to catch a couple of classes that *require* that `options` be passed on intialization.
```
property_source = 'keys'
all_keys = []
for widget_class in widget_list:
try:
keys = properties(widget_class(), source=property_source)
except TraitError as e:
keys = properties(widget_class(options=(2,10)), source=property_source)
finally:
all_keys.append(keys)
```
Probably should have used a dict from the beginning...
```
key_dict = {k: v for k, v in zip(names, all_keys)}
```
## Define a few groups of widgets by widget interface type
This makes for nicer (i.e. more compact and readable) tables later on.
```
sliders = [k for k in key_dict.keys() if 'Slider' in k]
buttons = [k for k in key_dict.keys() if 'Button' in k]
containers = ['Box', 'VBox', 'HBox', 'Accordion', 'Tab']
texts = [k for k in names if 'text' in k or 'Text' in k] + [k for k in names if 'HTML' in k] + ['Label']
progress = [k for k in names if 'Progress' in k]
selects = ['Dropdown', 'Select', 'SelectMultiple']
all_so_far = sliders + buttons + texts + containers + progress + selects
others = [k for k in names if k not in all_so_far]
slider_keys = set()
```
# Tables of keys (synced properties)
## Sliders
```
table_for_keys(sliders, key_dict, source=property_source)
```
## Buttons
```
table_for_keys(buttons, key_dict, source=property_source)
```
## Containers
```
table_for_keys(containers, key_dict, source=property_source)
```
## Text
```
table_for_keys(texts, key_dict, source=property_source)
```
## Progress bars
```
table_for_keys(progress, key_dict, source=property_source)
```
# Select widgets
```
table_for_keys(selects, key_dict, source=property_source)
```
## Everything else
```
table_for_keys(others, key_dict, source=property_source)
```
## Style keys
```
property_source = 'style_keys'
style_keys = []
for widget_class in widget_list:
try:
keys = properties(widget_class(), source=property_source)
except TraitError as e:
keys = properties(widget_class(options=(2,10)), source=property_source)
except AttributeError:
keys=''
finally:
style_keys.append(keys)
for w, s in zip(names, style_keys):
print('{} has style keys: {}'.format(w, ', '.join(s)))
```
|
github_jupyter
|
import itertools
from ipywidgets import *
from IPython.display import display
from traitlets import TraitError
from astropy.table import Table, Column
def properties(widget, omit=None, source=None):
"""
Return a list of widget properties for a widget instance, omitting
common properties.
Parameters
----------
widget : ipywidgets.Widget instance
The widget for which the list of preoperties is desired.
omit : list, optional
List of properties to omit in the return value. Default is
``['layout', 'style', 'msg_throttle']``, and for `source='traits'
is extended to add ``['keys', 'comm']``.
source : str, one of 'keys', 'traits', 'class_own_traits', 'style_keys' optional
Source of property list for widget. Default is ``'keys'``.
"""
if source is None:
source = 'keys'
valid_sources = ('keys', 'traits', 'class_own_traits', 'style_keys')
if source not in valid_sources:
raise ValueError('source must be one of {}'.format(', '.join(valid_sources)))
if omit is None:
omit = ['layout', 'style', 'msg_throttle']
if source == 'keys':
props = widget.keys
elif source == 'traits':
props = widget.traits()
omit.extend(['keys', 'comm'])
elif source == 'class_own_traits':
props = widget.class_own_traits()
elif source == 'style_keys':
props = widget.style.keys
props = [k for k in props if not k.startswith('_')]
return [k for k in props if k not in omit]
def table_for_keys(keys, keys_info, source):
unique_keys = set()
for k in keys:
unique_keys.update(keys_info[k])
unique_keys = sorted(unique_keys)
string_it = lambda x: 'X' if x else ''
colnames = ['Property ({})'.format(source)] + keys
columns = [Column(name=colnames[0], data=unique_keys)]
for c in colnames[1:]:
column = Column(name=c, data=[string_it(k in key_dict[c]) for k in unique_keys])
columns.append(column)
return Table(columns)
widget_list = [
IntSlider,
FloatSlider,
IntRangeSlider,
FloatRangeSlider,
IntProgress,
FloatProgress,
BoundedIntText,
BoundedFloatText,
IntText,
FloatText,
ToggleButton,
Checkbox,
Valid,
Dropdown,
RadioButtons,
Select,
SelectionSlider,
SelectionRangeSlider,
ToggleButtons,
SelectMultiple,
Text,
Textarea,
Label,
HTML,
HTMLMath,
Image,
Button,
Play,
DatePicker,
ColorPicker,
Box,
HBox,
VBox,
Accordion,
Tab
]
names = [wd.__name__ for wd in widget_list]
property_source = 'keys'
all_keys = []
for widget_class in widget_list:
try:
keys = properties(widget_class(), source=property_source)
except TraitError as e:
keys = properties(widget_class(options=(2,10)), source=property_source)
finally:
all_keys.append(keys)
key_dict = {k: v for k, v in zip(names, all_keys)}
sliders = [k for k in key_dict.keys() if 'Slider' in k]
buttons = [k for k in key_dict.keys() if 'Button' in k]
containers = ['Box', 'VBox', 'HBox', 'Accordion', 'Tab']
texts = [k for k in names if 'text' in k or 'Text' in k] + [k for k in names if 'HTML' in k] + ['Label']
progress = [k for k in names if 'Progress' in k]
selects = ['Dropdown', 'Select', 'SelectMultiple']
all_so_far = sliders + buttons + texts + containers + progress + selects
others = [k for k in names if k not in all_so_far]
slider_keys = set()
table_for_keys(sliders, key_dict, source=property_source)
table_for_keys(buttons, key_dict, source=property_source)
table_for_keys(containers, key_dict, source=property_source)
table_for_keys(texts, key_dict, source=property_source)
table_for_keys(progress, key_dict, source=property_source)
table_for_keys(selects, key_dict, source=property_source)
table_for_keys(others, key_dict, source=property_source)
property_source = 'style_keys'
style_keys = []
for widget_class in widget_list:
try:
keys = properties(widget_class(), source=property_source)
except TraitError as e:
keys = properties(widget_class(options=(2,10)), source=property_source)
except AttributeError:
keys=''
finally:
style_keys.append(keys)
for w, s in zip(names, style_keys):
print('{} has style keys: {}'.format(w, ', '.join(s)))
| 0.6508 | 0.835215 |
```
import yahoo_fin.stock_info as si
import pandas as pd
%matplotlib inline
dow_jones_tickers = si.tickers_dow()
prices = list(map(si.get_live_price, dow_jones_tickers))
for ticker in dow_jones_tickers:
si.get_data(ticker)['close'].plot('line')
import yahoo_fin.stock_info as si
dow_table = pd.DataFrame([si.tickers_dow(), list(map(si.get_live_price, si.tickers_dow()))])
dow_table = dow_table.transpose()
dow_table.columns = ['Company', 'Price']
dow_table
# Extracted from https://en.wikipedia.org/wiki/Dow_Jones_Industrial_Average
company_names = [
'3M',
'American Express',
'Apple',
'Boeing',
'Caterpillar',
'Chevron',
'Cisco Systems',
'Coca-Cola',
'Dow Inc.',
'ExxonMobil',
'Goldman Sachs',
'The Home Depot',
'IBM',
'Intel',
'Johnson & Johnson',
'JPMorgan Chase',
'McDonald\'s',
'Merck & Company',
'Microsoft',
'Nike',
'Pfizer',
'Procter & Gamble',
'Travelers',
'UnitedHealth Group',
'United Technologies',
'Verizon',
'Visa',
'Walmart',
'Walgreens Boots Alliance',
'Walt Disney',
]
symbol = [
'MMM',
'AXP',
'AAPL',
'BA',
'CAT',
'CVX',
'CSCO',
'KO',
'DOW',
'XOM',
'GS',
'HD',
'IBM',
'INTC',
'JNJ',
'JPM',
'MCD',
'MRK',
'MSFT',
'NKE',
'PFE',
'PG',
'TRV',
'UNH',
'UTX',
'VZ',
'V',
'WMT',
'WBA',
'DIS',
]
industry = [
'Conglomerate',
'Financial services',
'Information technologies',
'Aerospace and defense',
'Construction and mining equipment',
'Oil & gas',
'Information technologies',
'Food',
'Chemical industry',
'Oil & gas',
'Financial services',
'Retail',
'Information technologies',
'Information technologies',
'Pharmaceuticals',
'Financial services',
'Food',
'Pharmaceuticals',
'Information technologies',
'Apparel',
'Pharmaceuticals',
'Consumer goods',
'Insurance',
'Managed health care',
'Conglomerate',
'Telecommunication',
'Financial services',
'Retail',
'Retail',
'Broadcasting and entertainment'
]
ticker2name = {k:v for k,v in zip(symbol, company_names) }
ticker2industry = {k:v for k,v in zip(symbol, industry) }
powers = {'T': 10 ** 12, 'B': 10 ** 9, 'M': 10 ** 6}
def f(s):
try:
power = s[-1]
return float(s[:-1]) * powers[power]
except TypeError:
return s
def get_stats(ticker):
stats = si.get_stats(ticker)
values = list(stats['Value'])
atb = list(stats['Attribute'])
return values, atb
def create_dow_table(index):
tickers = si.tickers_dow()
table = pd.DataFrame(columns=['Company', 'Price'] + get_stats(tickers[0])[1], index=tickers)
table['Company'] = tickers
table['Price'] = list(map(si.get_live_price, table['Company']))
table[table.columns.drop(['Company', 'Price'])] = list(map(lambda x: get_stats(x)[0], table['Company']))
return table
dow = create_dow_table('dow')
dow['Cap'] = dow['Market Cap (intraday) 5'].apply(f)
dow.index = dow['Company'].apply(lambda x: ticker2name[x])
dow['Industry'] = dow['Company'].apply(lambda x: ticker2industry[x])
dow
dow.plot.pie(y='Cap', figsize=(20,20))
dow[['Industry', 'Cap']].groupby('Industry').agg('sum').plot.pie(y='Cap', figsize=(20,20), title='Dow Jones')
```
|
github_jupyter
|
import yahoo_fin.stock_info as si
import pandas as pd
%matplotlib inline
dow_jones_tickers = si.tickers_dow()
prices = list(map(si.get_live_price, dow_jones_tickers))
for ticker in dow_jones_tickers:
si.get_data(ticker)['close'].plot('line')
import yahoo_fin.stock_info as si
dow_table = pd.DataFrame([si.tickers_dow(), list(map(si.get_live_price, si.tickers_dow()))])
dow_table = dow_table.transpose()
dow_table.columns = ['Company', 'Price']
dow_table
# Extracted from https://en.wikipedia.org/wiki/Dow_Jones_Industrial_Average
company_names = [
'3M',
'American Express',
'Apple',
'Boeing',
'Caterpillar',
'Chevron',
'Cisco Systems',
'Coca-Cola',
'Dow Inc.',
'ExxonMobil',
'Goldman Sachs',
'The Home Depot',
'IBM',
'Intel',
'Johnson & Johnson',
'JPMorgan Chase',
'McDonald\'s',
'Merck & Company',
'Microsoft',
'Nike',
'Pfizer',
'Procter & Gamble',
'Travelers',
'UnitedHealth Group',
'United Technologies',
'Verizon',
'Visa',
'Walmart',
'Walgreens Boots Alliance',
'Walt Disney',
]
symbol = [
'MMM',
'AXP',
'AAPL',
'BA',
'CAT',
'CVX',
'CSCO',
'KO',
'DOW',
'XOM',
'GS',
'HD',
'IBM',
'INTC',
'JNJ',
'JPM',
'MCD',
'MRK',
'MSFT',
'NKE',
'PFE',
'PG',
'TRV',
'UNH',
'UTX',
'VZ',
'V',
'WMT',
'WBA',
'DIS',
]
industry = [
'Conglomerate',
'Financial services',
'Information technologies',
'Aerospace and defense',
'Construction and mining equipment',
'Oil & gas',
'Information technologies',
'Food',
'Chemical industry',
'Oil & gas',
'Financial services',
'Retail',
'Information technologies',
'Information technologies',
'Pharmaceuticals',
'Financial services',
'Food',
'Pharmaceuticals',
'Information technologies',
'Apparel',
'Pharmaceuticals',
'Consumer goods',
'Insurance',
'Managed health care',
'Conglomerate',
'Telecommunication',
'Financial services',
'Retail',
'Retail',
'Broadcasting and entertainment'
]
ticker2name = {k:v for k,v in zip(symbol, company_names) }
ticker2industry = {k:v for k,v in zip(symbol, industry) }
powers = {'T': 10 ** 12, 'B': 10 ** 9, 'M': 10 ** 6}
def f(s):
try:
power = s[-1]
return float(s[:-1]) * powers[power]
except TypeError:
return s
def get_stats(ticker):
stats = si.get_stats(ticker)
values = list(stats['Value'])
atb = list(stats['Attribute'])
return values, atb
def create_dow_table(index):
tickers = si.tickers_dow()
table = pd.DataFrame(columns=['Company', 'Price'] + get_stats(tickers[0])[1], index=tickers)
table['Company'] = tickers
table['Price'] = list(map(si.get_live_price, table['Company']))
table[table.columns.drop(['Company', 'Price'])] = list(map(lambda x: get_stats(x)[0], table['Company']))
return table
dow = create_dow_table('dow')
dow['Cap'] = dow['Market Cap (intraday) 5'].apply(f)
dow.index = dow['Company'].apply(lambda x: ticker2name[x])
dow['Industry'] = dow['Company'].apply(lambda x: ticker2industry[x])
dow
dow.plot.pie(y='Cap', figsize=(20,20))
dow[['Industry', 'Cap']].groupby('Industry').agg('sum').plot.pie(y='Cap', figsize=(20,20), title='Dow Jones')
| 0.34632 | 0.54577 |
# Lab 01: Introduction to PyTorch
```
# For Google Colaboratory
import sys, os
if 'google.colab' in sys.modules:
from google.colab import drive
drive.mount('/content/gdrive')
file_name = 'pytorch_introduction.ipynb'
import subprocess
path_to_file = subprocess.check_output('find . -type f -name ' + str(file_name), shell=True).decode("utf-8")
print(path_to_file)
path_to_file = path_to_file.replace(file_name,"").replace('\n',"")
os.chdir(path_to_file)
!pwd
import torch
```
### PyTorch Tensors
### Construct a vector of 3 elements
```
x=torch.Tensor( [5.3 , 2.1 , -3.1 ] )
print(x)
```
### Construct a 2 x 2 matrix
```
A=torch.Tensor( [ [5.3,2.1] , [0.2,2.1] ] )
print(A)
```
### Construct a random 10 x 2 matrix
```
A=torch.rand(10,2)
print(A)
```
### Construct a 10 x 2 matrix filled with zeros
```
A=torch.zeros(10,2)
print(A)
```
### Construct a 5 x 2 x 2 random Tensor
```
B = torch.rand(5,2,2)
print(B)
B.size()
B.dim()
B.type()
```
### Size and Dimension of a Tensor
#### A 3-dimensional Tensor
```
A=torch.rand(3,2,2)
print(A)
print( A.dim() )
print( A.size() )
print( A.size(0) )
```
#### A 2-dimensional Tensor
```
B=torch.rand(3,5)
print(B)
print( B.dim() )
print( B.size() )
print( B.size(0) )
print( B.size(1) )
```
#### A 1-dimensional Tensor
```
x=torch.rand(7)
print(x)
print( x.dim() )
print( x.size() )
```
### Adding and multiplying tensors
```
A=torch.rand(2,2)
B=torch.rand(2,2)
C=2*B
D=A+C
E=A*B
print(A)
print('')
print(B)
print('')
print(C)
print('')
print(C)
print('')
print(E)
```
### Floats versus integers
```
x=torch.Tensor([1.2 , 2.5])
print(x)
print(x.type())
y=torch.LongTensor([5,6])
print(y)
print(y.type())
y=y.float()
print(y)
print(y.type())
x=x.long()
print(x)
print(x.type())
```
### Other functions
```
x=torch.arange(10)
print(x)
print(x.type())
x=torch.randperm(10)
print(x)
print(x.type())
x=torch.arange(10).long()
print(x)
print(x.type())
```
### Tips
Check tensor sizes for algebra computations like multiplication torch.mm(X1,X2) with X1.size(), X2.size()
Check tensor type for data manipulations with X.type()
### Reshaping a tensor
```
x=torch.arange(10)
print(x)
print( x.view(2,5) )
print( x.view(5,2) )
```
### Note that the original tensor x was NOT modified
```
print(x)
```
### To make the change permanent you need to create a new tensor
```
y=x.view(5,2)
print(x)
print('')
print(y)
```
### Slicing a tensor
```
print( y )
print( y[0] )
print( y[1] )
v = y[2]
print(v)
```
### Extract row 1 (included) to row 4 (excluded)
```
print(y)
print( y[1:4] )
idx = 1
n=3
print( y[idx:idx+n] )
```
### Let check the sizes after slicing
```
print(y)
z= y[1:1+3]
print(z)
print('')
print('dimension=',z.dim())
print(z.size())
v=y[1]
print(v)
print('')
print('dimension=',v.dim())
print(v.size())
```
### Acessing the entries of a Tensor
```
print(y)
print(y)
a=y[4,0]
print(a) # a is a scalar, not a tensor
print(a.dim())
print(a.size())
```
### A matrix is 2-dimensional Tensor
### A row of a matrix is a 1-dimensional Tensor
### An entry of a matrix is a 0-dimensional Tensor
### 0-dimensional Tensor are scalar!
### If we want to convert a 0-dimensional Tensor into python number, we need to use item()
```
b=a.item()
print(a)
print(type(a))
print(b)
print(type(b))
```
|
github_jupyter
|
# For Google Colaboratory
import sys, os
if 'google.colab' in sys.modules:
from google.colab import drive
drive.mount('/content/gdrive')
file_name = 'pytorch_introduction.ipynb'
import subprocess
path_to_file = subprocess.check_output('find . -type f -name ' + str(file_name), shell=True).decode("utf-8")
print(path_to_file)
path_to_file = path_to_file.replace(file_name,"").replace('\n',"")
os.chdir(path_to_file)
!pwd
import torch
x=torch.Tensor( [5.3 , 2.1 , -3.1 ] )
print(x)
A=torch.Tensor( [ [5.3,2.1] , [0.2,2.1] ] )
print(A)
A=torch.rand(10,2)
print(A)
A=torch.zeros(10,2)
print(A)
B = torch.rand(5,2,2)
print(B)
B.size()
B.dim()
B.type()
A=torch.rand(3,2,2)
print(A)
print( A.dim() )
print( A.size() )
print( A.size(0) )
B=torch.rand(3,5)
print(B)
print( B.dim() )
print( B.size() )
print( B.size(0) )
print( B.size(1) )
x=torch.rand(7)
print(x)
print( x.dim() )
print( x.size() )
A=torch.rand(2,2)
B=torch.rand(2,2)
C=2*B
D=A+C
E=A*B
print(A)
print('')
print(B)
print('')
print(C)
print('')
print(C)
print('')
print(E)
x=torch.Tensor([1.2 , 2.5])
print(x)
print(x.type())
y=torch.LongTensor([5,6])
print(y)
print(y.type())
y=y.float()
print(y)
print(y.type())
x=x.long()
print(x)
print(x.type())
x=torch.arange(10)
print(x)
print(x.type())
x=torch.randperm(10)
print(x)
print(x.type())
x=torch.arange(10).long()
print(x)
print(x.type())
x=torch.arange(10)
print(x)
print( x.view(2,5) )
print( x.view(5,2) )
print(x)
y=x.view(5,2)
print(x)
print('')
print(y)
print( y )
print( y[0] )
print( y[1] )
v = y[2]
print(v)
print(y)
print( y[1:4] )
idx = 1
n=3
print( y[idx:idx+n] )
print(y)
z= y[1:1+3]
print(z)
print('')
print('dimension=',z.dim())
print(z.size())
v=y[1]
print(v)
print('')
print('dimension=',v.dim())
print(v.size())
print(y)
print(y)
a=y[4,0]
print(a) # a is a scalar, not a tensor
print(a.dim())
print(a.size())
b=a.item()
print(a)
print(type(a))
print(b)
print(type(b))
| 0.216012 | 0.916185 |
<a href="https://colab.research.google.com/github/datadynamo/aiconf_sj_2019_pytorch/blob/master/05_Transfer_Learning_Tutorial.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
%matplotlib inline
```
```
# This is formatted as code
```
This notebook is acopy from https://pytorch.org/tutorials/_downloads/transfer_learning_tutorial.ipynb
Modifications have been made to accomadate Google Colab data attachment
---------------------------------
Transfer Learning Tutorial
==========================
**Author**: `Sasank Chilamkurthy <https://chsasank.github.io>`_
In this tutorial, you will learn how to train your network using
transfer learning. You can read more about the transfer learning at `cs231n
notes <http://cs231n.github.io/transfer-learning/>`__
Quoting these notes,
In practice, very few people train an entire Convolutional Network
from scratch (with random initialization), because it is relatively
rare to have a dataset of sufficient size. Instead, it is common to
pretrain a ConvNet on a very large dataset (e.g. ImageNet, which
contains 1.2 million images with 1000 categories), and then use the
ConvNet either as an initialization or a fixed feature extractor for
the task of interest.
These two major transfer learning scenarios look as follows:
- **Finetuning the convnet**: Instead of random initializaion, we
initialize the network with a pretrained network, like the one that is
trained on imagenet 1000 dataset. Rest of the training looks as
usual.
- **ConvNet as fixed feature extractor**: Here, we will freeze the weights
for all of the network except that of the final fully connected
layer. This last fully connected layer is replaced with a new one
with random weights and only this layer is trained.
```
# License: BSD
# Author: Sasank Chilamkurthy
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
plt.ion() # interactive mode
```
Load Data
---------
We will use torchvision and torch.utils.data packages for loading the
data.
The problem we're going to solve today is to train a model to classify
**ants** and **bees**. We have about 120 training images each for ants and bees.
There are 75 validation images for each class. Usually, this is a very
small dataset to generalize upon, if trained from scratch. Since we
are using transfer learning, we should be able to generalize reasonably
well.
This dataset is a very small subset of imagenet.
.. Note ::
Download the data from
`here <https://download.pytorch.org/tutorial/hymenoptera_data.zip>`_
and extract it to the current directory.
```
!gsutil cp gs://pytorchtutorial/hymenoptera_data.zip /tmp/hymenoptera_data.zip
!unzip /tmp/hymenoptera_data.zip
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
data_dir = 'hymenoptera_data'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4,
shuffle=True, num_workers=4)
for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
```
Visualize a few images
^^^^^^^^^^^^^^^^^^^^^^
Let's visualize a few training images so as to understand the data
augmentations.
```
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
# Get a batch of training data
inputs, classes = next(iter(dataloaders['train']))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs)
imshow(out, title=[class_names[x] for x in classes])
```
Training the model
------------------
Now, let's write a general function to train a model. Here, we will
illustrate:
- Scheduling the learning rate
- Saving the best model
In the following, parameter ``scheduler`` is an LR scheduler object from
``torch.optim.lr_scheduler``.
```
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
scheduler.step()
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
```
Visualizing the model predictions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Generic function to display predictions for a few images
```
def visualize_model(model, num_images=6):
was_training = model.training
model.eval()
images_so_far = 0
fig = plt.figure()
with torch.no_grad():
for i, (inputs, labels) in enumerate(dataloaders['val']):
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
for j in range(inputs.size()[0]):
images_so_far += 1
ax = plt.subplot(num_images//2, 2, images_so_far)
ax.axis('off')
ax.set_title('predicted: {}'.format(class_names[preds[j]]))
imshow(inputs.cpu().data[j])
if images_so_far == num_images:
model.train(mode=was_training)
return
model.train(mode=was_training)
```
Finetuning the convnet
----------------------
Load a pretrained model and reset final fully connected layer.
```
model_ft = models.resnet18(pretrained=True)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, 2)
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
```
Train and evaluate
^^^^^^^^^^^^^^^^^^
It should take around 15-25 min on CPU. On GPU though, it takes less than a
minute.
```
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=2)
visualize_model(model_ft)
```
ConvNet as fixed feature extractor
----------------------------------
Here, we need to freeze all the network except the final layer. We need
to set ``requires_grad == False`` to freeze the parameters so that the
gradients are not computed in ``backward()``.
You can read more about this in the documentation
`here <http://pytorch.org/docs/notes/autograd.html#excluding-subgraphs-from-backward>`__.
```
model_conv = torchvision.models.resnet18(pretrained=True)
for param in model_conv.parameters():
param.requires_grad = False
# Parameters of newly constructed modules have requires_grad=True by default
num_ftrs = model_conv.fc.in_features
model_conv.fc = nn.Linear(num_ftrs, 2)
model_conv = model_conv.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that only parameters of final layer are being optimized as
# opoosed to before.
optimizer_conv = optim.SGD(model_conv.fc.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_conv, step_size=7, gamma=0.1)
```
Train and evaluate
^^^^^^^^^^^^^^^^^^
On CPU this will take about half the time compared to previous scenario.
This is expected as gradients don't need to be computed for most of the
network. However, forward does need to be computed.
```
model_conv = train_model(model_conv, criterion, optimizer_conv,
exp_lr_scheduler, num_epochs=2)
visualize_model(model_conv)
plt.ioff()
plt.show()
```
|
github_jupyter
|
%matplotlib inline
# This is formatted as code
# License: BSD
# Author: Sasank Chilamkurthy
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
plt.ion() # interactive mode
!gsutil cp gs://pytorchtutorial/hymenoptera_data.zip /tmp/hymenoptera_data.zip
!unzip /tmp/hymenoptera_data.zip
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
data_dir = 'hymenoptera_data'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4,
shuffle=True, num_workers=4)
for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
# Get a batch of training data
inputs, classes = next(iter(dataloaders['train']))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs)
imshow(out, title=[class_names[x] for x in classes])
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
scheduler.step()
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
def visualize_model(model, num_images=6):
was_training = model.training
model.eval()
images_so_far = 0
fig = plt.figure()
with torch.no_grad():
for i, (inputs, labels) in enumerate(dataloaders['val']):
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
for j in range(inputs.size()[0]):
images_so_far += 1
ax = plt.subplot(num_images//2, 2, images_so_far)
ax.axis('off')
ax.set_title('predicted: {}'.format(class_names[preds[j]]))
imshow(inputs.cpu().data[j])
if images_so_far == num_images:
model.train(mode=was_training)
return
model.train(mode=was_training)
model_ft = models.resnet18(pretrained=True)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, 2)
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=2)
visualize_model(model_ft)
model_conv = torchvision.models.resnet18(pretrained=True)
for param in model_conv.parameters():
param.requires_grad = False
# Parameters of newly constructed modules have requires_grad=True by default
num_ftrs = model_conv.fc.in_features
model_conv.fc = nn.Linear(num_ftrs, 2)
model_conv = model_conv.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that only parameters of final layer are being optimized as
# opoosed to before.
optimizer_conv = optim.SGD(model_conv.fc.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_conv, step_size=7, gamma=0.1)
model_conv = train_model(model_conv, criterion, optimizer_conv,
exp_lr_scheduler, num_epochs=2)
visualize_model(model_conv)
plt.ioff()
plt.show()
| 0.883676 | 0.989879 |
# 02 - Detect Model Bias
Let's first import our dataset and pre-process it:
```
import pandas as pd
# Load dataset into dataframe
loan_dataset = pd.read_csv("../datasets/loan.csv")
loan_dataset.head()
# Prepare categorical and numeric features
categorical_features = ["sex", "rent", "minority", "ZIP", "occupation"]
numeric_features = [
"education", "age", "income", "loan_size", "payment_timing",
"year", "job_stability"
]
for cat in categorical_features:
loan_dataset[cat] = loan_dataset[cat].astype("object")
```
We first need to define which variable is going to be our __outcome variable__ (the one we want to predict), and which are going to be our __sensitive features__ (those that the modeler should take into account when evaluating the fairness of the data or algorithm).
```
# Define outcome variable:
pred = "default"
# Define sensitive features:
sensitive_features = ["minority", "sex"]
```
Let's know get our data into the right format to be the input of a machine learning algorithms in scikit-learn:
```
# Define X and y
X = loan_dataset.copy().drop([pred], axis=1)
y = (loan_dataset.copy()[pred] != f"{pred}-no").astype(int).values
```
We can now run a __logistic regression__ to predict our outcome variable:
```
from sklearn.compose import ColumnTransformer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, OneHotEncoder
# Create preprocessor of features
numeric_transformer = Pipeline(steps=[
('scaler', StandardScaler())]
)
categorical_transformer = OneHotEncoder(handle_unknown='ignore')
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, numeric_features),
('cat', categorical_transformer, categorical_features)
]
)
# Create pipeline
clf = Pipeline(
steps=[
('preprocessor', preprocessor),
('classifier', LogisticRegression())
]
)
# Split into train and test dataset
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=42
)
# Train classifier
clf.fit(X_train, y_train)
```
Let's use our models to make predictions on the whole dataset:
```
# Predict
y_pred = clf.predict(X)
print(f"Example of the first twenty predictions: {y_pred[:20]}")
```
# Quality of service harm
Let's inspect how the accuracy of the model changes for the different __sensitive subpopulations__ defined by the __sensitive features__:
```
from fairlearn.metrics import MetricFrame
from sklearn.metrics import recall_score, precision_score
# Break precision and recall for different subpopulations
for sf in sensitive_features:
grouped_metric = MetricFrame(
{"precision": precision_score, "recall": recall_score}, y, y_pred,
sensitive_features=loan_dataset["minority"]
)
grouped_metric_df = grouped_metric.by_group
display(grouped_metric_df)
print(f"Overall precision and recall:")
display(pd.DataFrame(grouped_metric.overall, columns=["Overall accuracy"]))
```
# Quality of allocation harm
Let's know inspect which values of `default` get predicted for each sensitive subpopulation. We will print the propotion that was assigned to each label:
```
for sf in sensitive_features:
print(f"Sensitive feature: {sf}")
pred_grouped = pd.DataFrame({f"{sf}": loan_dataset[sf], "y_pred": y_pred, "y_true": y})
pred_vals = pred_grouped.groupby(sf).sum().values / loan_dataset[sf].value_counts().values
pred_grouped = pd.DataFrame(pred_vals, columns=[f"{pred}_predicted", f"{pred}_true"])
display(pred_grouped)
```
|
github_jupyter
|
import pandas as pd
# Load dataset into dataframe
loan_dataset = pd.read_csv("../datasets/loan.csv")
loan_dataset.head()
# Prepare categorical and numeric features
categorical_features = ["sex", "rent", "minority", "ZIP", "occupation"]
numeric_features = [
"education", "age", "income", "loan_size", "payment_timing",
"year", "job_stability"
]
for cat in categorical_features:
loan_dataset[cat] = loan_dataset[cat].astype("object")
# Define outcome variable:
pred = "default"
# Define sensitive features:
sensitive_features = ["minority", "sex"]
# Define X and y
X = loan_dataset.copy().drop([pred], axis=1)
y = (loan_dataset.copy()[pred] != f"{pred}-no").astype(int).values
from sklearn.compose import ColumnTransformer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, OneHotEncoder
# Create preprocessor of features
numeric_transformer = Pipeline(steps=[
('scaler', StandardScaler())]
)
categorical_transformer = OneHotEncoder(handle_unknown='ignore')
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, numeric_features),
('cat', categorical_transformer, categorical_features)
]
)
# Create pipeline
clf = Pipeline(
steps=[
('preprocessor', preprocessor),
('classifier', LogisticRegression())
]
)
# Split into train and test dataset
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=42
)
# Train classifier
clf.fit(X_train, y_train)
# Predict
y_pred = clf.predict(X)
print(f"Example of the first twenty predictions: {y_pred[:20]}")
from fairlearn.metrics import MetricFrame
from sklearn.metrics import recall_score, precision_score
# Break precision and recall for different subpopulations
for sf in sensitive_features:
grouped_metric = MetricFrame(
{"precision": precision_score, "recall": recall_score}, y, y_pred,
sensitive_features=loan_dataset["minority"]
)
grouped_metric_df = grouped_metric.by_group
display(grouped_metric_df)
print(f"Overall precision and recall:")
display(pd.DataFrame(grouped_metric.overall, columns=["Overall accuracy"]))
for sf in sensitive_features:
print(f"Sensitive feature: {sf}")
pred_grouped = pd.DataFrame({f"{sf}": loan_dataset[sf], "y_pred": y_pred, "y_true": y})
pred_vals = pred_grouped.groupby(sf).sum().values / loan_dataset[sf].value_counts().values
pred_grouped = pd.DataFrame(pred_vals, columns=[f"{pred}_predicted", f"{pred}_true"])
display(pred_grouped)
| 0.829492 | 0.975739 |
```
import pandas as pd
import numpy as np
import datetime as dt
# Files to load
jan2021 = "Data/2021/JC-202101-citibike-tripdata.csv"
feb2021 = "Data/2021/JC-202102-citibike-tripdata.csv"
mar2021 = "Data/2021/JC-202103-citibike-tripdata.csv"
apr2021 = "Data/2021/JC-202104-citibike-tripdata.csv"
may2021 = "Data/2021/JC-202105-citibike-tripdata.csv"
jun2021 = "Data/2021/JC-202106-citibike-tripdata.csv"
jul2021 = "Data/2021/JC-202107-citibike-tripdata.csv"
aug2021 = "Data/2021/JC-202108-citibike-tripdata.csv"
sep2021 = "Data/2021/JC-202109-citibike-tripdata.csv"
oct2021 = "Data/2021/JC-202110-citibike-tripdata.csv"
# read csv files
jan2021_df = pd.read_csv(jan2021)
feb2021_df = pd.read_csv(feb2021)
mar2021_df = pd.read_csv(mar2021)
apr2021_df = pd.read_csv(apr2021)
may2021_df = pd.read_csv(may2021)
jun2021_df = pd.read_csv(jun2021)
jul2021_df = pd.read_csv(jul2021)
aug2021_df = pd.read_csv(aug2021)
sep2021_df = pd.read_csv(sep2021)
oct2021_df = pd.read_csv(oct2021)
jan2021_df.count()
# Convert All DateTimes to "%Y-%m-%d %H:%M:%S" Format
jan2021_df["starttime"] = pd.to_datetime(jan2021_df["starttime"])
jan2021_df["stoptime"] = pd.to_datetime(jan2021_df["stoptime"])
# Dropped these columns because they were not included in the most recent data in 2021
Clean_jan2021_df = jan2021_df.drop(columns=['birth year', 'gender', 'bikeid'])
Clean_jan2021_df
# made all usertypes the same binary options because the data changd in 2021 removing some fields
# and changing the values of some existing fields
Clean_jan2021_df['usertype'].replace('Subscriber','member',inplace = True)
Clean_jan2021_df['usertype'].replace('Customer','casual',inplace = True)
Clean_jan2021_df.head()
Clean_jan2021_df.count()
feb2021_df.count()
feb2021_df
```
# The dataset changed some binary choices and removed some columns between January to February
```
oct2021_df
# combine like months in 2021 dataframes into a single datafram
febthruoct_df = feb2021_df.append([mar2021_df, apr2021_df, may2021_df, jun2021_df, jul2021_df, aug2021_df, sep2021_df, oct2021_df], ignore_index=True)
febthruoct_df.head()
febthruoct_df.count()
# Convert All DateTimes to "%Y-%m-%d %H:%M:%S" Format
febthruoct_df["started_at"] = pd.to_datetime(febthruoct_df["started_at"])
febthruoct_df["ended_at"] = pd.to_datetime(febthruoct_df["ended_at"])
Clean_jan2021_df.count()
# caclulate trip durration since it was field that was removed.
# the result is in seconds, which match the 2019 and 2020 data
febthruoct_df["tripduration"] = febthruoct_df["ended_at"]- febthruoct_df["started_at"]
#converted trip duration to minutes for better analysis
febthruoct_df["tripduration"] = febthruoct_df["tripduration"]/np.timedelta64(1,'s')
febthruoct_df_reorder = febthruoct_df[['ride_id','rideable_type','tripduration','started_at','ended_at','start_station_id','start_station_name','start_lat','start_lng','end_station_id','end_station_name','end_lat','end_lng','member_casual']]
febthruoct_df_reorder.count()
#renamed columns to match the other dataframes
febthruoct_df_reorder_rename = febthruoct_df_reorder.rename(columns={"started_at": "starttime",
"ended_at": "stoptime",
"start_station_id": "start station id",
"start_station_name": "start station name",
"start_lat": "start station latitude",
"start_lng": "start station longitude",
"end_station_id": "end station id",
"end_station_name": "end station name",
"end_lat": "end station latitude",
"end_lng": "end station longitude",
"member_casual": "usertype",
})
febthruoct_df_reorder_rename
# found the longest trip duration
febthruoct_df_reorder_rename['tripduration'].max()
# dropped columns that didn't match the 2019 and 2020 data
Drop_febthruoct_df_reorder_rename = febthruoct_df_reorder_rename.drop(columns=['ride_id', 'rideable_type','start station id', 'end station id'])
Drop_febthruoct_df_reorder_rename.count()
Clean_jan2021_df.count()
Drop_febthruoct_df_reorder_rename.head()
#dropped any rows that could have a null value
Clean_febthruoct_df_reorder_rename = Drop_febthruoct_df_reorder_rename.dropna()
Clean_febthruoct_df_reorder_rename.count()
#Combined clean data into one 2021 dataset for analysis
Clean_BikeData_2021_df = Clean_jan2021_df.append([Clean_febthruoct_df_reorder_rename])
Clean_BikeData_2021_df.head()
Clean_BikeData_2021_df.count()
# Export dataframe to csv for analysis
Clean_BikeData_2021_df.to_csv("Source/Clean_BikeData_2021_df.csv", index=False, header=True)
```
|
github_jupyter
|
import pandas as pd
import numpy as np
import datetime as dt
# Files to load
jan2021 = "Data/2021/JC-202101-citibike-tripdata.csv"
feb2021 = "Data/2021/JC-202102-citibike-tripdata.csv"
mar2021 = "Data/2021/JC-202103-citibike-tripdata.csv"
apr2021 = "Data/2021/JC-202104-citibike-tripdata.csv"
may2021 = "Data/2021/JC-202105-citibike-tripdata.csv"
jun2021 = "Data/2021/JC-202106-citibike-tripdata.csv"
jul2021 = "Data/2021/JC-202107-citibike-tripdata.csv"
aug2021 = "Data/2021/JC-202108-citibike-tripdata.csv"
sep2021 = "Data/2021/JC-202109-citibike-tripdata.csv"
oct2021 = "Data/2021/JC-202110-citibike-tripdata.csv"
# read csv files
jan2021_df = pd.read_csv(jan2021)
feb2021_df = pd.read_csv(feb2021)
mar2021_df = pd.read_csv(mar2021)
apr2021_df = pd.read_csv(apr2021)
may2021_df = pd.read_csv(may2021)
jun2021_df = pd.read_csv(jun2021)
jul2021_df = pd.read_csv(jul2021)
aug2021_df = pd.read_csv(aug2021)
sep2021_df = pd.read_csv(sep2021)
oct2021_df = pd.read_csv(oct2021)
jan2021_df.count()
# Convert All DateTimes to "%Y-%m-%d %H:%M:%S" Format
jan2021_df["starttime"] = pd.to_datetime(jan2021_df["starttime"])
jan2021_df["stoptime"] = pd.to_datetime(jan2021_df["stoptime"])
# Dropped these columns because they were not included in the most recent data in 2021
Clean_jan2021_df = jan2021_df.drop(columns=['birth year', 'gender', 'bikeid'])
Clean_jan2021_df
# made all usertypes the same binary options because the data changd in 2021 removing some fields
# and changing the values of some existing fields
Clean_jan2021_df['usertype'].replace('Subscriber','member',inplace = True)
Clean_jan2021_df['usertype'].replace('Customer','casual',inplace = True)
Clean_jan2021_df.head()
Clean_jan2021_df.count()
feb2021_df.count()
feb2021_df
oct2021_df
# combine like months in 2021 dataframes into a single datafram
febthruoct_df = feb2021_df.append([mar2021_df, apr2021_df, may2021_df, jun2021_df, jul2021_df, aug2021_df, sep2021_df, oct2021_df], ignore_index=True)
febthruoct_df.head()
febthruoct_df.count()
# Convert All DateTimes to "%Y-%m-%d %H:%M:%S" Format
febthruoct_df["started_at"] = pd.to_datetime(febthruoct_df["started_at"])
febthruoct_df["ended_at"] = pd.to_datetime(febthruoct_df["ended_at"])
Clean_jan2021_df.count()
# caclulate trip durration since it was field that was removed.
# the result is in seconds, which match the 2019 and 2020 data
febthruoct_df["tripduration"] = febthruoct_df["ended_at"]- febthruoct_df["started_at"]
#converted trip duration to minutes for better analysis
febthruoct_df["tripduration"] = febthruoct_df["tripduration"]/np.timedelta64(1,'s')
febthruoct_df_reorder = febthruoct_df[['ride_id','rideable_type','tripduration','started_at','ended_at','start_station_id','start_station_name','start_lat','start_lng','end_station_id','end_station_name','end_lat','end_lng','member_casual']]
febthruoct_df_reorder.count()
#renamed columns to match the other dataframes
febthruoct_df_reorder_rename = febthruoct_df_reorder.rename(columns={"started_at": "starttime",
"ended_at": "stoptime",
"start_station_id": "start station id",
"start_station_name": "start station name",
"start_lat": "start station latitude",
"start_lng": "start station longitude",
"end_station_id": "end station id",
"end_station_name": "end station name",
"end_lat": "end station latitude",
"end_lng": "end station longitude",
"member_casual": "usertype",
})
febthruoct_df_reorder_rename
# found the longest trip duration
febthruoct_df_reorder_rename['tripduration'].max()
# dropped columns that didn't match the 2019 and 2020 data
Drop_febthruoct_df_reorder_rename = febthruoct_df_reorder_rename.drop(columns=['ride_id', 'rideable_type','start station id', 'end station id'])
Drop_febthruoct_df_reorder_rename.count()
Clean_jan2021_df.count()
Drop_febthruoct_df_reorder_rename.head()
#dropped any rows that could have a null value
Clean_febthruoct_df_reorder_rename = Drop_febthruoct_df_reorder_rename.dropna()
Clean_febthruoct_df_reorder_rename.count()
#Combined clean data into one 2021 dataset for analysis
Clean_BikeData_2021_df = Clean_jan2021_df.append([Clean_febthruoct_df_reorder_rename])
Clean_BikeData_2021_df.head()
Clean_BikeData_2021_df.count()
# Export dataframe to csv for analysis
Clean_BikeData_2021_df.to_csv("Source/Clean_BikeData_2021_df.csv", index=False, header=True)
| 0.297776 | 0.41739 |
<a href="https://colab.research.google.com/github/Danielajanb/Linear-Algebra_2nd-Sem/blob/main/Assignment_10.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
## Laboratory 10 : Linear Combination and Vector Spaces
Now that you have a fundamental knowledge about linear combination, we'll try to visualize it using scientific programming.
### Objectives
At the end of this activity you will be able to:
1. Be familiar with representing linear combinations in the 2-dimensional plane.
2. Visualize spans using vector fields in Python.
3. Perform vector fields operations using scientific programming.
## Discussion
```
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
```
## Linear Combination
It is said that a linear combination is the combination of linear scaling and addition of a vector its bases/components
We will try to visualize the vectors and their linear combinations by plotting a sample of real number values for the scalars for the vectors. Let's first try the vectors below:
$$X = \begin{bmatrix} 2\\4 \\\end{bmatrix} , Y = \begin{bmatrix} 3\\6 \\\end{bmatrix} $$
```
vectX = np.array([2,4])
vectY = np.array([3,6])
```
#### Span of single vectors
As discussed in the lecture, the span of individual vectors can be represented by a line span. Let's take vector $X$ as an example.
$$X = c\cdot \begin{bmatrix} 2\\4 \\\end{bmatrix} $$
```
c = np.arange(-11,10,0.15)
plt.scatter(c*vectX[0],c*vectX[1], color='green')
plt.xlim(-11,11)
plt.ylim(-11,11)
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.grid()
plt.show()
```
$$Y = c\cdot \begin{bmatrix} 3\\6 \\\end{bmatrix} $$
```
c = np.arange(-20,4,0.16)
plt.scatter(c*vectY[0],c*vectY[1], color='blue')
plt.xlim(-18,18)
plt.ylim(-18,18)
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.grid()
plt.show()
```
### Span of a linear combination of vectors
So what if we are to plot the span of a linear combination of vectors? We can visualize as a plane on the 2-dimensional coordinate system. Let's take the span of the linear combination below:
$$S = \begin{Bmatrix} c_1 \cdot\begin{bmatrix} -1\\0 \\\end{bmatrix},
c_2 \cdot \begin{bmatrix} 1\\-1 \\\end{bmatrix}\end{Bmatrix} $$
```
vectA = np.array([-1,0])
vectB = np.array([1,-1])
R = np.arange(-16,10,0.84)
c1, c2 = np.meshgrid(R,R)
vectR = vectA + vectB
spanRx = c1*vectA[0] + c2*vectB[0]
spanRy = c1*vectA[1] + c2*vectB[1]
##plt.scatter(R*vectA[0],R*vectA[1])
##plt.scatter(R*vectB[0],R*vectB[1])
plt.scatter(spanRx,spanRy, s=5, alpha=0.75, color='magenta')
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.grid()
plt.show()
vectP = np.array([2,4])
vectQ = np.array([3,6])
R = np.arange(-25,13,1)
c1, c2 = np.meshgrid(R,R)
vectR = vectP + vectQ
spanRx = c1*vectP[0] + c2*vectQ[0]
spanRy = c1*vectP[1] + c2*vectQ[1]
##plt.scatter(R*vectA[0],R*vectA[1])
##plt.scatter(R*vectB[0],R*vectB[1])
plt.scatter(spanRx,spanRy, s=5, alpha=0.75, color='pink')
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.grid()
plt.show()
```
Take note that if vectors are seen to be as a 2-dimensional span we can say it has a Rank of 2 or $\mathbb{R}^2$. But if the span of the linear combination of vectors are seen to be like a line, they are said to be <b> linearly dependent </b> and they have a rank of 1 or $\mathbb{R}^1$.
### Task 1
Try different linear combinations using different scalar values. In your methodology discuss the different functions that you have used, the linear equation and vector form of the linear combination, and the flowchart for declaring and displaying linear combinations. Please make sure that your flowchart has only few words and not putting the entire code as it is bad practice. In your results, display and discuss the linear combination visualization you made. You should use the cells below for displaying the equation markdows using LaTeX and your code.
$$
Z= 5x \\ M= 2x + 4y\\
$$
$$ Z = \begin{bmatrix} 5\end{bmatrix} , M = \begin{bmatrix} 2 \\ 4\end{bmatrix}
$$
$$ Z = \begin{bmatrix} 5\end{bmatrix} , M = \begin{bmatrix} 2 & 4\end{bmatrix}
$$
```
vectZ = np.array([4,8])
vectM = np.array([9,-36])
R = np.arange(-18,7,1)
c1, c2 = np.meshgrid(R,R)
vectR = vectZ + vectM
spanRx = c1*vectZ[0] + c2*vectM[0]
spanRy = c1*vectZ[1] + c2*vectM[1]
##plt.scatter(R*vectA[0],R*vectA[1])
##plt.scatter(R*vectB[0],R*vectB[1])
plt.scatter(spanRx,spanRy, s=5, alpha=0.75, color='purple')
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.grid()
plt.show()
```
For your conclusion synthesize the concept and application of the laboratory. Briefly discuss what you have learn and achieved in this activity. At the end of your conclusion try to answer the question : "How can you apply the concept of linear combination in engineering or real-life situations?".
Linear functions is a combination of statistics and solving. It applies in real life situation such as computing your monthly income and monthly savings. Assume that toy sales increase linearly month after month, you can use the formula for linear regression. You can also make graphs that makes it easier to understand. Distance and rate problems, pricing problems, calculating dimensions, and combining varying percentages of solutions are all instances of real-life linear equations. For example, calculating the cost of a blouse on sale for $100 and marked down by 50% before the sale.
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
vectX = np.array([2,4])
vectY = np.array([3,6])
c = np.arange(-11,10,0.15)
plt.scatter(c*vectX[0],c*vectX[1], color='green')
plt.xlim(-11,11)
plt.ylim(-11,11)
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.grid()
plt.show()
c = np.arange(-20,4,0.16)
plt.scatter(c*vectY[0],c*vectY[1], color='blue')
plt.xlim(-18,18)
plt.ylim(-18,18)
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.grid()
plt.show()
vectA = np.array([-1,0])
vectB = np.array([1,-1])
R = np.arange(-16,10,0.84)
c1, c2 = np.meshgrid(R,R)
vectR = vectA + vectB
spanRx = c1*vectA[0] + c2*vectB[0]
spanRy = c1*vectA[1] + c2*vectB[1]
##plt.scatter(R*vectA[0],R*vectA[1])
##plt.scatter(R*vectB[0],R*vectB[1])
plt.scatter(spanRx,spanRy, s=5, alpha=0.75, color='magenta')
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.grid()
plt.show()
vectP = np.array([2,4])
vectQ = np.array([3,6])
R = np.arange(-25,13,1)
c1, c2 = np.meshgrid(R,R)
vectR = vectP + vectQ
spanRx = c1*vectP[0] + c2*vectQ[0]
spanRy = c1*vectP[1] + c2*vectQ[1]
##plt.scatter(R*vectA[0],R*vectA[1])
##plt.scatter(R*vectB[0],R*vectB[1])
plt.scatter(spanRx,spanRy, s=5, alpha=0.75, color='pink')
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.grid()
plt.show()
vectZ = np.array([4,8])
vectM = np.array([9,-36])
R = np.arange(-18,7,1)
c1, c2 = np.meshgrid(R,R)
vectR = vectZ + vectM
spanRx = c1*vectZ[0] + c2*vectM[0]
spanRy = c1*vectZ[1] + c2*vectM[1]
##plt.scatter(R*vectA[0],R*vectA[1])
##plt.scatter(R*vectB[0],R*vectB[1])
plt.scatter(spanRx,spanRy, s=5, alpha=0.75, color='purple')
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.grid()
plt.show()
| 0.447219 | 0.987351 |
# pomegranate / sklearn Naive Bayes comparison
authors: <br>
Nicholas Farn (nicholasfarn@gmail.com) <br>
Jacob Schreiber (jmschreiber91@gmail.com)
<a href="https://github.com/scikit-learn/scikit-learn">sklearn</a> is a very popular machine learning package for Python which implements a wide variety of classical machine learning algorithms. In this notebook we benchmark the Naive Bayes implementations in pomegranate and compare it to the implementation in sklearn.
```
%pylab inline
import seaborn, time
seaborn.set_style('whitegrid')
from sklearn.naive_bayes import GaussianNB
from pomegranate import *
```
Lets first define a function which will create a dataset to train on. We want to be able to test a range of datasets, from very small to very large, to see which implementation is faster. We also want a function which will take in the models and evaluate them. Lets define both of those now.
```
def create_dataset(n_samples, n_dim, n_classes):
"""Create a random dataset with n_samples in each class."""
X = numpy.concatenate([numpy.random.randn(n_samples, n_dim) + i for i in range(n_classes)])
y = numpy.concatenate([numpy.zeros(n_samples) + i for i in range(n_classes)])
return X, y
def plot(fit, predict, skl_error, pom_error, sizes, xlabel):
"""Plot the results."""
idx = numpy.arange(fit.shape[1])
plt.figure(figsize=(14, 4))
plt.plot(fit.mean(axis=0), c='c', label="Fitting")
plt.plot(predict.mean(axis=0), c='m', label="Prediction")
plt.plot([0, fit.shape[1]], [1, 1], c='k', label="Baseline")
plt.fill_between(idx, fit.min(axis=0), fit.max(axis=0), color='c', alpha=0.3)
plt.fill_between(idx, predict.min(axis=0), predict.max(axis=0), color='m', alpha=0.3)
plt.xticks(idx, sizes, rotation=65, fontsize=14)
plt.xlabel('{}'.format(xlabel), fontsize=14)
plt.ylabel('pomegranate is x times faster', fontsize=14)
plt.legend(fontsize=12, loc=4)
plt.show()
plt.figure(figsize=(14, 4))
plt.plot(1 - skl_error.mean(axis=0), alpha=0.5, c='c', label="sklearn accuracy")
plt.plot(1 - pom_error.mean(axis=0), alpha=0.5, c='m', label="pomegranate accuracy")
plt.fill_between(idx, 1-skl_error.min(axis=0), 1-skl_error.max(axis=0), color='c', alpha=0.3)
plt.fill_between(idx, 1-pom_error.min(axis=0), 1-pom_error.max(axis=0), color='m', alpha=0.3)
plt.xticks(idx, sizes, rotation=65, fontsize=14)
plt.xlabel('{}'.format(xlabel), fontsize=14)
plt.ylabel('Accuracy', fontsize=14)
plt.legend(fontsize=14)
plt.show()
```
Lets look first at single dimension Gaussian datasets. We'll look at how many times faster pomegranate is, which means that values > 1 show pomegranate is faster and < 1 show pomegranate is slower. Lets also look at the accuracy of both algorithms. They should have the same accuracy since they implement the same algorithm.
```
sizes = numpy.around(numpy.exp(numpy.arange(8, 16))).astype('int')
n, m = sizes.shape[0], 20
skl_predict, pom_predict = numpy.zeros((m, n)), numpy.zeros((m, n))
skl_fit, pom_fit = numpy.zeros((m, n)), numpy.zeros((m, n))
skl_error, pom_error = numpy.zeros((m, n)), numpy.zeros((m, n))
for i in range(m):
for j, size in enumerate(sizes):
X, y = create_dataset(size, 1, 2)
# bench fit times
tic = time.time()
skl = GaussianNB()
skl.fit(X, y)
skl_fit[i, j] = time.time() - tic
tic = time.time()
pom = NaiveBayes.from_samples(NormalDistribution, X, y)
pom_fit[i, j] = time.time() - tic
# bench predict times
tic = time.time()
skl_predictions = skl.predict(X)
skl_predict[i, j] = time.time() - tic
tic = time.time()
pom_predictions = pom.predict(X)
pom_predict[i, j] = time.time() - tic
# check number wrong
skl_e = (y != skl_predictions).mean()
pom_e = (y != pom_predictions).mean()
skl_error[i, j] = min(skl_e, 1-skl_e)
pom_error[i, j] = min(pom_e, 1-pom_e)
fit = skl_fit / pom_fit
predict = skl_predict / pom_predict
plot(fit, predict, skl_error, pom_error, sizes, "samples per component")
```
It looks as if pomegranate is approximately the same speed for training small models but that the prediction time can be a lot faster in pomegranate than in sklearn.
Now let's take a look at how speeds change as we increase the number of classes that need to be predicted rather than phrasing all of the comparisons on binary classification.
```
sizes = numpy.arange(2, 21).astype('int')
n, m = sizes.shape[0], 20
skl_predict, pom_predict = numpy.zeros((m, n)), numpy.zeros((m, n))
skl_fit, pom_fit = numpy.zeros((m, n)), numpy.zeros((m, n))
skl_error, pom_error = numpy.zeros((m, n)), numpy.zeros((m, n))
for i in range(m):
for j, size in enumerate(sizes):
X, y = create_dataset(50000 // size, 1, size)
# bench fit times
tic = time.time()
skl = GaussianNB()
skl.fit(X, y)
skl_fit[i, j] = time.time() - tic
tic = time.time()
pom = NaiveBayes.from_samples(NormalDistribution, X, y)
pom_fit[i, j] = time.time() - tic
# bench predict times
tic = time.time()
skl_predictions = skl.predict(X)
skl_predict[i, j] = time.time() - tic
tic = time.time()
pom_predictions = pom.predict(X)
pom_predict[i, j] = time.time() - tic
# check number wrong
skl_e = (y != skl_predictions).mean()
pom_e = (y != pom_predictions).mean()
skl_error[i, j] = min(skl_e, 1-skl_e)
pom_error[i, j] = min(pom_e, 1-pom_e)
fit = skl_fit / pom_fit
predict = skl_predict / pom_predict
plot(fit, predict, skl_error, pom_error, sizes, "number of classes")
```
It looks like, again, pomegranate is around the same speed as sklearn for fitting models, but that it is consistently much faster to make predictions.
```
X, y = create_dataset(50000, 1, 2)
skl = GaussianNB()
skl.fit(X, y)
pom = NaiveBayes.from_samples(NormalDistribution, X, y)
%timeit skl.predict(X)
%timeit pom.predict(X)
```
This does show that pomegranate is faster at making predictions but that both are so fast that potentially it doesn't really matter.
While it's good to start off by looking at naive Bayes' models defined on single features, the more common setting is one where you have many features. Let's look take a look at the relative speeds on larger number of examples when there are 5 features rather than a single one.
```
sizes = numpy.around(numpy.exp(numpy.arange(8, 16))).astype('int')
n, m = sizes.shape[0], 20
skl_predict, pom_predict = numpy.zeros((m, n)), numpy.zeros((m, n))
skl_fit, pom_fit = numpy.zeros((m, n)), numpy.zeros((m, n))
skl_error, pom_error = numpy.zeros((m, n)), numpy.zeros((m, n))
for i in range(m):
for j, size in enumerate(sizes):
X, y = create_dataset(size, 5, 2)
# bench fit times
tic = time.time()
skl = GaussianNB()
skl.fit(X, y)
skl_fit[i, j] = time.time() - tic
tic = time.time()
pom = NaiveBayes.from_samples(NormalDistribution, X, y)
pom_fit[i, j] = time.time() - tic
# bench predict times
tic = time.time()
skl_predictions = skl.predict(X)
skl_predict[i, j] = time.time() - tic
tic = time.time()
pom_predictions = pom.predict(X)
pom_predict[i, j] = time.time() - tic
# check number wrong
skl_e = (y != skl_predictions).mean()
pom_e = (y != pom_predictions).mean()
skl_error[i, j] = min(skl_e, 1-skl_e)
pom_error[i, j] = min(pom_e, 1-pom_e)
fit = skl_fit / pom_fit
predict = skl_predict / pom_predict
plot(fit, predict, skl_error, pom_error, sizes, "samples per component")
```
It looks like pomegranate can be around twice as fast at fitting multivariate Gaussian Naive Bayes models than sklearn when there is more than one feature.
Finally lets show an increasing number of dimensions with a fixed set of 10 classes and 50,000 samples per class.
```
sizes = numpy.arange(5, 101, 5).astype('int')
n, m = sizes.shape[0], 20
skl_predict, pom_predict = numpy.zeros((m, n)), numpy.zeros((m, n))
skl_fit, pom_fit = numpy.zeros((m, n)), numpy.zeros((m, n))
skl_error, pom_error = numpy.zeros((m, n)), numpy.zeros((m, n))
for i in range(m):
for j, size in enumerate(sizes):
X, y = create_dataset(50000, size, 2)
# bench fit times
tic = time.time()
skl = GaussianNB()
skl.fit(X, y)
skl_fit[i, j] = time.time() - tic
tic = time.time()
pom = NaiveBayes.from_samples(NormalDistribution, X, y)
pom_fit[i, j] = time.time() - tic
# bench predict times
tic = time.time()
skl_predictions = skl.predict(X)
skl_predict[i, j] = time.time() - tic
tic = time.time()
pom_predictions = pom.predict(X)
pom_predict[i, j] = time.time() - tic
# check number wrong
skl_e = (y != skl_predictions).mean()
pom_e = (y != pom_predictions).mean()
skl_error[i, j] = min(skl_e, 1-skl_e)
pom_error[i, j] = min(pom_e, 1-pom_e)
fit = skl_fit / pom_fit
predict = skl_predict / pom_predict
plot(fit, predict, skl_error, pom_error, sizes, "dimensions")
```
Looks like pomegranate is consistently faster than sklearn at fitting the model but conveges to be approximately the same speed at making predictions in the high dimensional setting. Their accuracies remain identical indicating that the two are learning the same model.
## Out of Core Training
Lastly, both pomegranate and sklearn allow for out of core training by fitting on chunks of a dataset. pomegranate does this by calculating summary statistics on the dataset which are enough to allow for exact parameter updates to be done. sklearn implements this using the `model.partial_fit(X, y)` API call, whereas pomegranate uses `model.summarize(X, y)` followed by `model.from_summaries()` to update the internal parameters.
Lets compare how long each method takes to train on 25 batches of increasing sizes and the accuracy of both methods.
```
sizes = numpy.around( numpy.exp( numpy.arange(8, 16) ) ).astype('int')
n, m = sizes.shape[0], 20
skl_time, pom_time = numpy.zeros((m, n)), numpy.zeros((m, n))
skl_error, pom_error = numpy.zeros((m, n)), numpy.zeros((m, n))
for i in range(m):
for j, size in enumerate(sizes):
skl = GaussianNB()
pom = NaiveBayes([IndependentComponentsDistribution([NormalDistribution(0, 1) for i in range(5)]),
IndependentComponentsDistribution([NormalDistribution(0, 1) for i in range(5)])])
for l in range(5):
X, y = create_dataset(size, 5, 2)
tic = time.time()
skl.partial_fit(X, y, classes=[0, 1])
skl_time[i, j] += time.time() - tic
tic = time.time()
pom.summarize( X, y )
pom_time[i, j] += time.time() - tic
tic = time.time()
pom.from_summaries()
pom_time[i, j] += time.time() - tic
skl_predictions = skl.predict( X )
pom_predictions = pom.predict( X )
skl_error[i, j] = ( y != skl_predictions ).mean()
pom_error[i, j] = ( y != pom_predictions ).mean()
fit = skl_time / pom_time
idx = numpy.arange(fit.shape[1])
plt.figure( figsize=(14, 4))
plt.plot( fit.mean(axis=0), c='c', label="Fitting")
plt.plot( [0, fit.shape[1]], [1, 1], c='k', label="Baseline" )
plt.fill_between( idx, fit.min(axis=0), fit.max(axis=0), color='c', alpha=0.3 )
plt.xticks(idx, sizes, rotation=65, fontsize=14)
plt.xlabel('{}'.format(xlabel), fontsize=14)
plt.ylabel('pomegranate is x times faster', fontsize=14)
plt.legend(fontsize=12, loc=4)
plt.show()
plt.figure( figsize=(14, 4))
plt.plot( 1 - skl_error.mean(axis=0), alpha=0.5, c='c', label="sklearn accuracy" )
plt.plot( 1 - pom_error.mean(axis=0), alpha=0.5, c='m', label="pomegranate accuracy" )
plt.fill_between( idx, 1-skl_error.min(axis=0), 1-skl_error.max(axis=0), color='c', alpha=0.3 )
plt.fill_between( idx, 1-pom_error.min(axis=0), 1-pom_error.max(axis=0), color='m', alpha=0.3 )
plt.xticks( idx, sizes, rotation=65, fontsize=14)
plt.xlabel('Batch Size', fontsize=14)
plt.ylabel('Accuracy', fontsize=14)
plt.legend(fontsize=14)
plt.show()
```
pomegranate seems to be much faster at doing out-of-core training. The out of core API of calculating sufficient statistics using `summarize` and then updating the model parameters using `from_summaries` extends to all models in pomegranate.
In this notebook we compared an intersection of the features that pomegranate and sklearn offer. pomegranate allows you to use Naive Bayes with any distribution or model object which has an exposed `log_probability` and `fit` method. This allows you to do things such as compare hidden Markov models to each other, or compare a hidden Markov model to a Markov Chain to see which one models the data better.
We hope this has been useful to you! If you're interested in using pomegranate, you can get it using `pip install pomegranate` or by checking out the <a href="https://github.com/jmschrei/pomegranate">github repo.</a>
|
github_jupyter
|
%pylab inline
import seaborn, time
seaborn.set_style('whitegrid')
from sklearn.naive_bayes import GaussianNB
from pomegranate import *
def create_dataset(n_samples, n_dim, n_classes):
"""Create a random dataset with n_samples in each class."""
X = numpy.concatenate([numpy.random.randn(n_samples, n_dim) + i for i in range(n_classes)])
y = numpy.concatenate([numpy.zeros(n_samples) + i for i in range(n_classes)])
return X, y
def plot(fit, predict, skl_error, pom_error, sizes, xlabel):
"""Plot the results."""
idx = numpy.arange(fit.shape[1])
plt.figure(figsize=(14, 4))
plt.plot(fit.mean(axis=0), c='c', label="Fitting")
plt.plot(predict.mean(axis=0), c='m', label="Prediction")
plt.plot([0, fit.shape[1]], [1, 1], c='k', label="Baseline")
plt.fill_between(idx, fit.min(axis=0), fit.max(axis=0), color='c', alpha=0.3)
plt.fill_between(idx, predict.min(axis=0), predict.max(axis=0), color='m', alpha=0.3)
plt.xticks(idx, sizes, rotation=65, fontsize=14)
plt.xlabel('{}'.format(xlabel), fontsize=14)
plt.ylabel('pomegranate is x times faster', fontsize=14)
plt.legend(fontsize=12, loc=4)
plt.show()
plt.figure(figsize=(14, 4))
plt.plot(1 - skl_error.mean(axis=0), alpha=0.5, c='c', label="sklearn accuracy")
plt.plot(1 - pom_error.mean(axis=0), alpha=0.5, c='m', label="pomegranate accuracy")
plt.fill_between(idx, 1-skl_error.min(axis=0), 1-skl_error.max(axis=0), color='c', alpha=0.3)
plt.fill_between(idx, 1-pom_error.min(axis=0), 1-pom_error.max(axis=0), color='m', alpha=0.3)
plt.xticks(idx, sizes, rotation=65, fontsize=14)
plt.xlabel('{}'.format(xlabel), fontsize=14)
plt.ylabel('Accuracy', fontsize=14)
plt.legend(fontsize=14)
plt.show()
sizes = numpy.around(numpy.exp(numpy.arange(8, 16))).astype('int')
n, m = sizes.shape[0], 20
skl_predict, pom_predict = numpy.zeros((m, n)), numpy.zeros((m, n))
skl_fit, pom_fit = numpy.zeros((m, n)), numpy.zeros((m, n))
skl_error, pom_error = numpy.zeros((m, n)), numpy.zeros((m, n))
for i in range(m):
for j, size in enumerate(sizes):
X, y = create_dataset(size, 1, 2)
# bench fit times
tic = time.time()
skl = GaussianNB()
skl.fit(X, y)
skl_fit[i, j] = time.time() - tic
tic = time.time()
pom = NaiveBayes.from_samples(NormalDistribution, X, y)
pom_fit[i, j] = time.time() - tic
# bench predict times
tic = time.time()
skl_predictions = skl.predict(X)
skl_predict[i, j] = time.time() - tic
tic = time.time()
pom_predictions = pom.predict(X)
pom_predict[i, j] = time.time() - tic
# check number wrong
skl_e = (y != skl_predictions).mean()
pom_e = (y != pom_predictions).mean()
skl_error[i, j] = min(skl_e, 1-skl_e)
pom_error[i, j] = min(pom_e, 1-pom_e)
fit = skl_fit / pom_fit
predict = skl_predict / pom_predict
plot(fit, predict, skl_error, pom_error, sizes, "samples per component")
sizes = numpy.arange(2, 21).astype('int')
n, m = sizes.shape[0], 20
skl_predict, pom_predict = numpy.zeros((m, n)), numpy.zeros((m, n))
skl_fit, pom_fit = numpy.zeros((m, n)), numpy.zeros((m, n))
skl_error, pom_error = numpy.zeros((m, n)), numpy.zeros((m, n))
for i in range(m):
for j, size in enumerate(sizes):
X, y = create_dataset(50000 // size, 1, size)
# bench fit times
tic = time.time()
skl = GaussianNB()
skl.fit(X, y)
skl_fit[i, j] = time.time() - tic
tic = time.time()
pom = NaiveBayes.from_samples(NormalDistribution, X, y)
pom_fit[i, j] = time.time() - tic
# bench predict times
tic = time.time()
skl_predictions = skl.predict(X)
skl_predict[i, j] = time.time() - tic
tic = time.time()
pom_predictions = pom.predict(X)
pom_predict[i, j] = time.time() - tic
# check number wrong
skl_e = (y != skl_predictions).mean()
pom_e = (y != pom_predictions).mean()
skl_error[i, j] = min(skl_e, 1-skl_e)
pom_error[i, j] = min(pom_e, 1-pom_e)
fit = skl_fit / pom_fit
predict = skl_predict / pom_predict
plot(fit, predict, skl_error, pom_error, sizes, "number of classes")
X, y = create_dataset(50000, 1, 2)
skl = GaussianNB()
skl.fit(X, y)
pom = NaiveBayes.from_samples(NormalDistribution, X, y)
%timeit skl.predict(X)
%timeit pom.predict(X)
sizes = numpy.around(numpy.exp(numpy.arange(8, 16))).astype('int')
n, m = sizes.shape[0], 20
skl_predict, pom_predict = numpy.zeros((m, n)), numpy.zeros((m, n))
skl_fit, pom_fit = numpy.zeros((m, n)), numpy.zeros((m, n))
skl_error, pom_error = numpy.zeros((m, n)), numpy.zeros((m, n))
for i in range(m):
for j, size in enumerate(sizes):
X, y = create_dataset(size, 5, 2)
# bench fit times
tic = time.time()
skl = GaussianNB()
skl.fit(X, y)
skl_fit[i, j] = time.time() - tic
tic = time.time()
pom = NaiveBayes.from_samples(NormalDistribution, X, y)
pom_fit[i, j] = time.time() - tic
# bench predict times
tic = time.time()
skl_predictions = skl.predict(X)
skl_predict[i, j] = time.time() - tic
tic = time.time()
pom_predictions = pom.predict(X)
pom_predict[i, j] = time.time() - tic
# check number wrong
skl_e = (y != skl_predictions).mean()
pom_e = (y != pom_predictions).mean()
skl_error[i, j] = min(skl_e, 1-skl_e)
pom_error[i, j] = min(pom_e, 1-pom_e)
fit = skl_fit / pom_fit
predict = skl_predict / pom_predict
plot(fit, predict, skl_error, pom_error, sizes, "samples per component")
sizes = numpy.arange(5, 101, 5).astype('int')
n, m = sizes.shape[0], 20
skl_predict, pom_predict = numpy.zeros((m, n)), numpy.zeros((m, n))
skl_fit, pom_fit = numpy.zeros((m, n)), numpy.zeros((m, n))
skl_error, pom_error = numpy.zeros((m, n)), numpy.zeros((m, n))
for i in range(m):
for j, size in enumerate(sizes):
X, y = create_dataset(50000, size, 2)
# bench fit times
tic = time.time()
skl = GaussianNB()
skl.fit(X, y)
skl_fit[i, j] = time.time() - tic
tic = time.time()
pom = NaiveBayes.from_samples(NormalDistribution, X, y)
pom_fit[i, j] = time.time() - tic
# bench predict times
tic = time.time()
skl_predictions = skl.predict(X)
skl_predict[i, j] = time.time() - tic
tic = time.time()
pom_predictions = pom.predict(X)
pom_predict[i, j] = time.time() - tic
# check number wrong
skl_e = (y != skl_predictions).mean()
pom_e = (y != pom_predictions).mean()
skl_error[i, j] = min(skl_e, 1-skl_e)
pom_error[i, j] = min(pom_e, 1-pom_e)
fit = skl_fit / pom_fit
predict = skl_predict / pom_predict
plot(fit, predict, skl_error, pom_error, sizes, "dimensions")
sizes = numpy.around( numpy.exp( numpy.arange(8, 16) ) ).astype('int')
n, m = sizes.shape[0], 20
skl_time, pom_time = numpy.zeros((m, n)), numpy.zeros((m, n))
skl_error, pom_error = numpy.zeros((m, n)), numpy.zeros((m, n))
for i in range(m):
for j, size in enumerate(sizes):
skl = GaussianNB()
pom = NaiveBayes([IndependentComponentsDistribution([NormalDistribution(0, 1) for i in range(5)]),
IndependentComponentsDistribution([NormalDistribution(0, 1) for i in range(5)])])
for l in range(5):
X, y = create_dataset(size, 5, 2)
tic = time.time()
skl.partial_fit(X, y, classes=[0, 1])
skl_time[i, j] += time.time() - tic
tic = time.time()
pom.summarize( X, y )
pom_time[i, j] += time.time() - tic
tic = time.time()
pom.from_summaries()
pom_time[i, j] += time.time() - tic
skl_predictions = skl.predict( X )
pom_predictions = pom.predict( X )
skl_error[i, j] = ( y != skl_predictions ).mean()
pom_error[i, j] = ( y != pom_predictions ).mean()
fit = skl_time / pom_time
idx = numpy.arange(fit.shape[1])
plt.figure( figsize=(14, 4))
plt.plot( fit.mean(axis=0), c='c', label="Fitting")
plt.plot( [0, fit.shape[1]], [1, 1], c='k', label="Baseline" )
plt.fill_between( idx, fit.min(axis=0), fit.max(axis=0), color='c', alpha=0.3 )
plt.xticks(idx, sizes, rotation=65, fontsize=14)
plt.xlabel('{}'.format(xlabel), fontsize=14)
plt.ylabel('pomegranate is x times faster', fontsize=14)
plt.legend(fontsize=12, loc=4)
plt.show()
plt.figure( figsize=(14, 4))
plt.plot( 1 - skl_error.mean(axis=0), alpha=0.5, c='c', label="sklearn accuracy" )
plt.plot( 1 - pom_error.mean(axis=0), alpha=0.5, c='m', label="pomegranate accuracy" )
plt.fill_between( idx, 1-skl_error.min(axis=0), 1-skl_error.max(axis=0), color='c', alpha=0.3 )
plt.fill_between( idx, 1-pom_error.min(axis=0), 1-pom_error.max(axis=0), color='m', alpha=0.3 )
plt.xticks( idx, sizes, rotation=65, fontsize=14)
plt.xlabel('Batch Size', fontsize=14)
plt.ylabel('Accuracy', fontsize=14)
plt.legend(fontsize=14)
plt.show()
| 0.760295 | 0.933975 |
# Features Scores
```
# Library
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
import math
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
import fix_yahoo_finance as yf
yf.pdr_override()
stock_name = 'AMD'
start = '2018-01-01'
end = '2018-09-14'
df = yf.download(stock_name, start, end)
df = df.reset_index()
df.head()
df.columns
df.columns = ['Adj_Close' if x=='Adj Close' else x for x in df.columns]
df.head()
```
# Simple Single Linear Regression
```
# Use one feature
import statsmodels.api as sm
from statsmodels.formula.api import ols
from statsmodels.sandbox.regression.predstd import wls_prediction_std
stock_model = ols("Adj_Close ~ Open", data=df).fit()
stock_model.summary()
fig = plt.figure(figsize=(15,8))
fig = sm.graphics.plot_regress_exog(stock_model, "Open", fig=fig)
# predictor variable (X) and dependent variable (y)
X = df[['Open']]
y = df[['Adj_Close']]
plt.figure(figsize=(12,8))
plt.plot(X, y, 'ro')
plt.show()
# Plot Stock Charts
_, confidence_interval_lower, confidence_interval_upper = wls_prediction_std(stock_model)
fig, ax = plt.subplots(figsize=(10,7))
ax.plot(X, y, 'bo', label="data")
# Plot trend line
ax.plot(X, stock_model.fittedvalues, 'g--.', label="OLS")
# Plot upper and lower confidence interval
ax.plot(X, confidence_interval_upper, 'r--')
ax.plot(X, confidence_interval_lower, 'r--')
# Plot title, grid, and legend
ax.set_title('Simple Linear Regression')
ax.grid()
ax.legend(loc='best')
model = LinearRegression()
model.fit(X, y)
a = model.coef_ * X + model.intercept_
a
plt.figure(figsize=(12,8))
plt.plot(X, y, 'ro', X, a)
axes = plt.gca()
# axes.set_ylim([0, 30])
plt.show()
```
# Multiple Linear Regression
```
# Multi Features
stock_models = ols("Adj_Close ~ Open + High + Low + Volume", data=df).fit()
stock_models.summary()
fig = plt.figure(figsize=(20,12))
fig = sm.graphics.plot_partregress_grid(stock_models, fig=fig)
# 2nd order polynomial
from sklearn.preprocessing import PolynomialFeatures
X = df[['High', 'Low']]
y = df[['Adj_Close']]
poly_2 = PolynomialFeatures(degree=2)
X = poly_2.fit_transform(X)
model2 = LinearRegression()
model2.fit(X, y)
model2.coef_
model2.score(X, y)
# 2nd order polynomial
X = df[['Open', 'Low']]
y = df[['Adj_Close']]
poly_2 = PolynomialFeatures(degree=2)
X = poly_2.fit_transform(X)
model2 = LinearRegression()
model2.fit(X, y)
model2.coef_
model2.score(X, y)
# 2nd order polynomial
X = df[['Open', 'High']]
y = df[['Adj_Close']]
poly_2 = PolynomialFeatures(degree=2)
X = poly_2.fit_transform(X)
model2 = LinearRegression()
model2.fit(X, y)
model2.coef_
model2.score(X, y)
from sklearn.preprocessing import PolynomialFeatures
X = df[['Open', 'High']].values
y = df[['Adj_Close']].values
X = np.array(X)
y = np.array(y)
poly = PolynomialFeatures(degree=2)
poly_features = poly.fit_transform(X)
poly.fit(X,y)
poly_regression = LinearRegression()
poly_regression.fit(poly_features,y)
print(X.shape)
print(y.shape)
X = X[:,:-1]
X.shape
# Slicing with [:, :-1] will give you a 2-dimensional array (including all rows and all columns excluding the last column).
# Slicing with [:, 1] will give you a 1-dimensional array (including all rows from the second column).
# To make this array also 2-dimensional use [:, 1:2] or [:, 1].reshape(-1, 1) or [:, 1][:, None] instead of [:, 1]. This will make x and y comparable.
# An alternative to making both arrays 2-dimensional is making them both one dimensional.
# For this one would do [:, 0] (instead of [:, :1]) for selecting the first column and [:, 1] for selecting the second column.
# Plotting the data for Plynomial Regression
regressor=LinearRegression()
regressor.fit(X,y)
plt.scatter(X,y, color='red')
plt.plot(X,poly_regression.predict(poly_features))
plt.title("Polynomial Regression with degree 2")
plt.xlabel("Stock Features")
plt.ylabel("Stock Prices")
plt.show()
# Plotting the Linear Regression
plt.scatter(X,y, color='red')
plt.plot(X,regressor.predict(X))
plt.title("Linear Regression")
plt.xlabel("Stock Features")
plt.ylabel("Stock Prices")
plt.show()
# 3rd order polynomial
X = df[['Open', 'High', 'Low']]
y = df[['Adj_Close']]
poly_3 = PolynomialFeatures(degree=3)
X = poly_3.fit_transform(X)
model3 = LinearRegression()
model3.fit(X, y)
model3.coef_
model3.score(X, y)
X = df[['Open', 'High', 'Low']].values
y = df[['Adj_Close']].values
poly = PolynomialFeatures(degree=3)
poly_features = poly.fit_transform(X)
poly.fit(X,y)
poly_regression = LinearRegression()
poly_regression.fit(poly_features,y)
X = X[:,:-2]
X.shape
# Plotting the data for Plynomial Regression
plt.scatter(X,y, color='red')
plt.plot(X,poly_regression.predict(poly_features))
plt.title("Polynomial Regression with degree 3")
plt.xlabel("Stock Features")
plt.ylabel("Stock Prices")
plt.show()
# Plotting the Linear Regression
plt.scatter(X,y, color='red')
plt.plot(X,regressor.predict(X))
plt.title("Linear Regression")
plt.xlabel("Stock Features")
plt.ylabel("Stock Prices")
plt.show()
# 4th order polynomial
X = df[['Open', 'High', 'Low', 'Volume']]
y = df[['Adj_Close']]
poly_4 = PolynomialFeatures(degree=4)
X = poly_4.fit_transform(X)
model4 = LinearRegression()
model4.fit(X, y)
model4.coef_
model4.score(X, y)
X = df[['Open', 'High', 'Low', 'Volume']].values
y = df[['Adj_Close']].values
poly = PolynomialFeatures(degree=4)
poly_features = poly.fit_transform(X)
poly.fit(X,y)
poly_regression = LinearRegression()
poly_regression.fit(poly_features,y)
X = X[:,:-3]
X.shape
# Plotting the data for Plynomial Regression
plt.scatter(X,y, color='red')
plt.plot(X,poly_regression.predict(poly_features))
poly = PolynomialFeatures(degree=4)
poly_features = poly.fit_transform(X)
poly.fit(X,y)
poly_regression = LinearRegression()
poly_regression.fit(poly_features,y)
# Plotting the Linear Regression
plt.scatter(X,y, color='red')
plt.plot(X,regressor.predict(X))
plt.title("Linear Regression")
plt.xlabel("Stock Features")
plt.ylabel("Stock Prices")
plt.show()
```
|
github_jupyter
|
# Library
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
import math
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
import fix_yahoo_finance as yf
yf.pdr_override()
stock_name = 'AMD'
start = '2018-01-01'
end = '2018-09-14'
df = yf.download(stock_name, start, end)
df = df.reset_index()
df.head()
df.columns
df.columns = ['Adj_Close' if x=='Adj Close' else x for x in df.columns]
df.head()
# Use one feature
import statsmodels.api as sm
from statsmodels.formula.api import ols
from statsmodels.sandbox.regression.predstd import wls_prediction_std
stock_model = ols("Adj_Close ~ Open", data=df).fit()
stock_model.summary()
fig = plt.figure(figsize=(15,8))
fig = sm.graphics.plot_regress_exog(stock_model, "Open", fig=fig)
# predictor variable (X) and dependent variable (y)
X = df[['Open']]
y = df[['Adj_Close']]
plt.figure(figsize=(12,8))
plt.plot(X, y, 'ro')
plt.show()
# Plot Stock Charts
_, confidence_interval_lower, confidence_interval_upper = wls_prediction_std(stock_model)
fig, ax = plt.subplots(figsize=(10,7))
ax.plot(X, y, 'bo', label="data")
# Plot trend line
ax.plot(X, stock_model.fittedvalues, 'g--.', label="OLS")
# Plot upper and lower confidence interval
ax.plot(X, confidence_interval_upper, 'r--')
ax.plot(X, confidence_interval_lower, 'r--')
# Plot title, grid, and legend
ax.set_title('Simple Linear Regression')
ax.grid()
ax.legend(loc='best')
model = LinearRegression()
model.fit(X, y)
a = model.coef_ * X + model.intercept_
a
plt.figure(figsize=(12,8))
plt.plot(X, y, 'ro', X, a)
axes = plt.gca()
# axes.set_ylim([0, 30])
plt.show()
# Multi Features
stock_models = ols("Adj_Close ~ Open + High + Low + Volume", data=df).fit()
stock_models.summary()
fig = plt.figure(figsize=(20,12))
fig = sm.graphics.plot_partregress_grid(stock_models, fig=fig)
# 2nd order polynomial
from sklearn.preprocessing import PolynomialFeatures
X = df[['High', 'Low']]
y = df[['Adj_Close']]
poly_2 = PolynomialFeatures(degree=2)
X = poly_2.fit_transform(X)
model2 = LinearRegression()
model2.fit(X, y)
model2.coef_
model2.score(X, y)
# 2nd order polynomial
X = df[['Open', 'Low']]
y = df[['Adj_Close']]
poly_2 = PolynomialFeatures(degree=2)
X = poly_2.fit_transform(X)
model2 = LinearRegression()
model2.fit(X, y)
model2.coef_
model2.score(X, y)
# 2nd order polynomial
X = df[['Open', 'High']]
y = df[['Adj_Close']]
poly_2 = PolynomialFeatures(degree=2)
X = poly_2.fit_transform(X)
model2 = LinearRegression()
model2.fit(X, y)
model2.coef_
model2.score(X, y)
from sklearn.preprocessing import PolynomialFeatures
X = df[['Open', 'High']].values
y = df[['Adj_Close']].values
X = np.array(X)
y = np.array(y)
poly = PolynomialFeatures(degree=2)
poly_features = poly.fit_transform(X)
poly.fit(X,y)
poly_regression = LinearRegression()
poly_regression.fit(poly_features,y)
print(X.shape)
print(y.shape)
X = X[:,:-1]
X.shape
# Slicing with [:, :-1] will give you a 2-dimensional array (including all rows and all columns excluding the last column).
# Slicing with [:, 1] will give you a 1-dimensional array (including all rows from the second column).
# To make this array also 2-dimensional use [:, 1:2] or [:, 1].reshape(-1, 1) or [:, 1][:, None] instead of [:, 1]. This will make x and y comparable.
# An alternative to making both arrays 2-dimensional is making them both one dimensional.
# For this one would do [:, 0] (instead of [:, :1]) for selecting the first column and [:, 1] for selecting the second column.
# Plotting the data for Plynomial Regression
regressor=LinearRegression()
regressor.fit(X,y)
plt.scatter(X,y, color='red')
plt.plot(X,poly_regression.predict(poly_features))
plt.title("Polynomial Regression with degree 2")
plt.xlabel("Stock Features")
plt.ylabel("Stock Prices")
plt.show()
# Plotting the Linear Regression
plt.scatter(X,y, color='red')
plt.plot(X,regressor.predict(X))
plt.title("Linear Regression")
plt.xlabel("Stock Features")
plt.ylabel("Stock Prices")
plt.show()
# 3rd order polynomial
X = df[['Open', 'High', 'Low']]
y = df[['Adj_Close']]
poly_3 = PolynomialFeatures(degree=3)
X = poly_3.fit_transform(X)
model3 = LinearRegression()
model3.fit(X, y)
model3.coef_
model3.score(X, y)
X = df[['Open', 'High', 'Low']].values
y = df[['Adj_Close']].values
poly = PolynomialFeatures(degree=3)
poly_features = poly.fit_transform(X)
poly.fit(X,y)
poly_regression = LinearRegression()
poly_regression.fit(poly_features,y)
X = X[:,:-2]
X.shape
# Plotting the data for Plynomial Regression
plt.scatter(X,y, color='red')
plt.plot(X,poly_regression.predict(poly_features))
plt.title("Polynomial Regression with degree 3")
plt.xlabel("Stock Features")
plt.ylabel("Stock Prices")
plt.show()
# Plotting the Linear Regression
plt.scatter(X,y, color='red')
plt.plot(X,regressor.predict(X))
plt.title("Linear Regression")
plt.xlabel("Stock Features")
plt.ylabel("Stock Prices")
plt.show()
# 4th order polynomial
X = df[['Open', 'High', 'Low', 'Volume']]
y = df[['Adj_Close']]
poly_4 = PolynomialFeatures(degree=4)
X = poly_4.fit_transform(X)
model4 = LinearRegression()
model4.fit(X, y)
model4.coef_
model4.score(X, y)
X = df[['Open', 'High', 'Low', 'Volume']].values
y = df[['Adj_Close']].values
poly = PolynomialFeatures(degree=4)
poly_features = poly.fit_transform(X)
poly.fit(X,y)
poly_regression = LinearRegression()
poly_regression.fit(poly_features,y)
X = X[:,:-3]
X.shape
# Plotting the data for Plynomial Regression
plt.scatter(X,y, color='red')
plt.plot(X,poly_regression.predict(poly_features))
poly = PolynomialFeatures(degree=4)
poly_features = poly.fit_transform(X)
poly.fit(X,y)
poly_regression = LinearRegression()
poly_regression.fit(poly_features,y)
# Plotting the Linear Regression
plt.scatter(X,y, color='red')
plt.plot(X,regressor.predict(X))
plt.title("Linear Regression")
plt.xlabel("Stock Features")
plt.ylabel("Stock Prices")
plt.show()
| 0.743447 | 0.852506 |
```
import cv2
import os
def load_images_from_folder(folder):
images = []
for filename in os.listdir(folder):
img1 = cv2.imread(os.path.join(folder,filename))
img = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
if img is not None:
images.append(img)
return images
im_daisy = load_images_from_folder('data/flower_photos/daisy')
im_tulips = load_images_from_folder('data/flower_photos/tulips')
import matplotlib.pyplot as plt
import numpy as np
% matplotlib inline
print(len(im_daisy))
samples_per_row = 5
num_rows = 2
# Function to visualize some of the images of our dataset
fig, axis = plt.subplots(num_rows,samples_per_row,figsize=(15,5))
#fig.axis('off')
n_train = len(im_daisy)
for row in axis:
for col in row:
idx = np.random.randint(0, n_train)
col.imshow(im_daisy[idx])
col.axis('off')
print(len(im_tulips))
samples_per_row = 5
num_rows = 2
# Function to visualize some of the images of our dataset
fig, axis = plt.subplots(num_rows,samples_per_row,figsize=(15,5))
#fig.axis('off')
n_train = len(im_tulips)
for row in axis:
for col in row:
idx = np.random.randint(0, n_train)
col.imshow(im_tulips[idx])
col.axis('off')
import os
SUBMIT_ARGS = "--packages databricks:spark-deep-learning:1.3.0-spark2.4-s_2.11 pyspark-shell"
os.environ["PYSPARK_SUBMIT_ARGS"] = SUBMIT_ARGS
from pyspark.sql import SparkSession
spark = SparkSession.builder \
.appName("ImageClassification") \
.config("spark.executor.memory", "70g") \
.config("spark.driver.memory", "50g") \
.config("spark.memory.offHeap.enabled",True) \
.config("spark.memory.offHeap.size","16g") \
.getOrCreate()
import pyspark.sql.functions as f
import sparkdl as dl
from pyspark.ml.image import ImageSchema
from sparkdl.image import imageIO
dftulips = ImageSchema.readImages('data/flower_photos/tulips').withColumn('label', f.lit(0))
dfdaisy = ImageSchema.readImages('data/flower_photos/daisy').withColumn('label', f.lit(1))
dfdaisy.show(5)
dftulips.show(5)
trainDFdaisy, testDFdaisy = dfdaisy.randomSplit([0.70,0.30], seed = 123)
trainDFtulips, testDFtulips = dftulips.randomSplit([0.70,0.30], seed = 122)
trainDF = trainDFdaisy.unionAll(trainDFtulips)
testDF = testDFdaisy.unionAll(testDFtulips)
#trainDF = trainDF.repartition(100) #required when dataset is large
#testDF = testDF.repartition(100)
from pyspark.ml.classification import LogisticRegression
from pyspark.ml import Pipeline
vectorizer = dl.DeepImageFeaturizer(inputCol="image", outputCol="features",
modelName="InceptionV3")
logreg = LogisticRegression(maxIter=20, labelCol="label")
pipeline = Pipeline(stages=[vectorizer, logreg])
pipeline_model = pipeline.fit(trainDF)
predictDF = pipeline_model.transform(testDF) #predict on test dataset
predictDF.select('prediction', 'label').show(n = testDF.toPandas().shape[0], truncate=False)
predictDF.crosstab('prediction', 'label').show()
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
scoring = predictDF.select("prediction", "label")
accuracy_score = MulticlassClassificationEvaluator(metricName="accuracy")
rate = accuracy_score.evaluate(scoring)*100
print("accuracy: {}%" .format(round(rate,2)))
```
|
github_jupyter
|
import cv2
import os
def load_images_from_folder(folder):
images = []
for filename in os.listdir(folder):
img1 = cv2.imread(os.path.join(folder,filename))
img = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
if img is not None:
images.append(img)
return images
im_daisy = load_images_from_folder('data/flower_photos/daisy')
im_tulips = load_images_from_folder('data/flower_photos/tulips')
import matplotlib.pyplot as plt
import numpy as np
% matplotlib inline
print(len(im_daisy))
samples_per_row = 5
num_rows = 2
# Function to visualize some of the images of our dataset
fig, axis = plt.subplots(num_rows,samples_per_row,figsize=(15,5))
#fig.axis('off')
n_train = len(im_daisy)
for row in axis:
for col in row:
idx = np.random.randint(0, n_train)
col.imshow(im_daisy[idx])
col.axis('off')
print(len(im_tulips))
samples_per_row = 5
num_rows = 2
# Function to visualize some of the images of our dataset
fig, axis = plt.subplots(num_rows,samples_per_row,figsize=(15,5))
#fig.axis('off')
n_train = len(im_tulips)
for row in axis:
for col in row:
idx = np.random.randint(0, n_train)
col.imshow(im_tulips[idx])
col.axis('off')
import os
SUBMIT_ARGS = "--packages databricks:spark-deep-learning:1.3.0-spark2.4-s_2.11 pyspark-shell"
os.environ["PYSPARK_SUBMIT_ARGS"] = SUBMIT_ARGS
from pyspark.sql import SparkSession
spark = SparkSession.builder \
.appName("ImageClassification") \
.config("spark.executor.memory", "70g") \
.config("spark.driver.memory", "50g") \
.config("spark.memory.offHeap.enabled",True) \
.config("spark.memory.offHeap.size","16g") \
.getOrCreate()
import pyspark.sql.functions as f
import sparkdl as dl
from pyspark.ml.image import ImageSchema
from sparkdl.image import imageIO
dftulips = ImageSchema.readImages('data/flower_photos/tulips').withColumn('label', f.lit(0))
dfdaisy = ImageSchema.readImages('data/flower_photos/daisy').withColumn('label', f.lit(1))
dfdaisy.show(5)
dftulips.show(5)
trainDFdaisy, testDFdaisy = dfdaisy.randomSplit([0.70,0.30], seed = 123)
trainDFtulips, testDFtulips = dftulips.randomSplit([0.70,0.30], seed = 122)
trainDF = trainDFdaisy.unionAll(trainDFtulips)
testDF = testDFdaisy.unionAll(testDFtulips)
#trainDF = trainDF.repartition(100) #required when dataset is large
#testDF = testDF.repartition(100)
from pyspark.ml.classification import LogisticRegression
from pyspark.ml import Pipeline
vectorizer = dl.DeepImageFeaturizer(inputCol="image", outputCol="features",
modelName="InceptionV3")
logreg = LogisticRegression(maxIter=20, labelCol="label")
pipeline = Pipeline(stages=[vectorizer, logreg])
pipeline_model = pipeline.fit(trainDF)
predictDF = pipeline_model.transform(testDF) #predict on test dataset
predictDF.select('prediction', 'label').show(n = testDF.toPandas().shape[0], truncate=False)
predictDF.crosstab('prediction', 'label').show()
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
scoring = predictDF.select("prediction", "label")
accuracy_score = MulticlassClassificationEvaluator(metricName="accuracy")
rate = accuracy_score.evaluate(scoring)*100
print("accuracy: {}%" .format(round(rate,2)))
| 0.547222 | 0.588002 |
```
import os
os.chdir('../../..')
import numpy as np
import matplotlib.pyplot as plt
from ai import cs
import torch
from databases.joint_sets import MuPoTSJoints
from databases.datasets import PersonStackedMuPoTsDataset, Mpi3dTestDataset
from util.misc import load
from util.viz import *
from util.pose import remove_root
from training.torch_tools import *
from training.preprocess import get_postprocessor, SaveableCompose, MeanNormalize3D
from training.loaders import UnchunkedGenerator
from scripts.eval import load_model
def joint2bone(nd):
cj = get_cjs()
return nd[:, cj[:, 0], :] - nd[:, cj[:, 1], :]
def bone2joint(pred_bx, pred_by, pred_bz, root):
cj = get_cjs()
cj_index = [2, 1, 0, 5, 4, 3, 9, 8, 12, 11, 10, 15, 14, 13, 7, 6]
ordered_cj = cj[cj_index, :]
pred_bxyz = np.stack((pred_bx, pred_by, pred_bz), axis=-1)
res = np.zeros((root.shape[0], 17, 3))
res[:, 14, :] = root
for (a, b), i in zip(ordered_cj, cj_index):
res[:, a, :] = res[:, b, :] + pred_bxyz[:, i, :]
return res
def get_cjs():
connected_joints = MuPoTSJoints().LIMBGRAPH
return np.array(connected_joints)
def get_rtp(nd):
diff = joint2bone(nd)
r, t, p = cs.cart2sp(diff[:, :, 0], diff[:, :, 1], diff[:, :, 2])
return r, t, p
def get_lengths(nd):
r, _, _ = get_rtp(nd)
return r
def get_xyz(r, t, p, root):
pred_bx, pred_by, pred_bz = cs.sp2cart(r, t, p)
return bone2joint(pred_bx, pred_by, pred_bz, root)
model_dir = '../models/4b1006aa968a47139217c9e7ac31e52f/'
config, model = load_model(model_dir)
def get_dataset(config):
data = PersonStackedMuPoTsDataset(
config["pose2d_type"],
config.get("pose3d_scaling", "normal"),
pose_validity="all",
)
# data = Mpi3dTestDataset(
# config["pose2d_type"],
# config.get("pose3d_scaling", "normal"),
# eval_frames_only=True,
# )
return data
dataset = get_dataset(config)
params_path = f"{model_dir}/preprocess_params.pkl"
transform = SaveableCompose.from_file(params_path, dataset, globals())
dataset.transform = transform
assert isinstance(transform.transforms[1].normalizer, MeanNormalize3D)
normalizer3d = transform.transforms[1].normalizer
post_process_func = get_postprocessor(config, dataset, normalizer3d)
augment = True
pad = (model.receptive_field() - 1) // 2
generator = UnchunkedGenerator(dataset, pad, augment)
seqs = sorted(np.unique(dataset.index.seq))
data_3d_mm = {}
preprocessed3d = {}
for seq in seqs:
inds = np.where(dataset.index.seq == seq)[0]
batch = dataset.get_samples(inds, False)
preprocessed3d[seq] = batch["pose3d"][batch["valid_pose"]]
data_3d_mm[seq] = dataset.poses3d[inds][batch["valid_pose"]]
# break
bl = {}
root = {}
org_pose3d = {}
for seq in seqs:
inds = np.where(dataset.index.seq == seq)[0]
batch = dataset.get_samples(inds, False)
bl[seq] = batch["length"][batch["valid_pose"]]
root[seq] = batch["root"][batch["valid_pose"]]
org_pose3d[seq] = batch["org_pose3d"][batch["valid_pose"]]
# break
_dgt = {}
seqs = sorted(np.unique(dataset.index.seq))
for seq in seqs:
inds = np.where(dataset.index.seq == seq)[0]
batch = dataset.get_samples(inds, False)
mgt = dataset.poses3d[inds][batch["valid_pose"]]
_dgt[seq] = mgt
_dpred = {}
raw_preds = {}
losses = {}
with torch.no_grad():
for i, (pose2d, valid) in enumerate(generator):
seq = seqs[i]
pred3d = (
model(torch.from_numpy(pose2d).cuda()).detach().cpu().numpy()
)
raw_preds[seq] = pred3d.copy() # .cpu().numpy()
valid = valid[0]
# pred_bo_np = pred3d[0][valid].reshape([-1, 2, 16])
# if orient_norm is None:
# pass
# elif orient_norm == "_1_1":
# pred_bo_np *= np.pi
# elif orient_norm == "0_1":
# pred_bo_np = (pred_bo_np * 2 * np.pi) - np.pi
# else:
# raise Exception(
# f"Not supported oreitation norm: {self.orient_norm}"
# )
# pred_bo = torch.from_numpy(pred_bo_np).to("cuda")
# orient_pred3d = (
# orient2pose(
# pred_bo,
# # torch.from_numpy(self.bo[seq]).to("cuda"),
# torch.from_numpy(bl[seq]).to("cuda"),
# torch.from_numpy(root[seq]).to("cuda"),
# )
# .cpu()
# .numpy()
# )
# preds[seq] = orient_pred3d
pred_real_pose = post_process_func(pred3d[0], seq)
if augment:
pred_real_pose_aug = post_process_func(pred3d[1], seq)
pred_real_pose_aug[:, :, 0] *= -1
pred_real_pose_aug = dataset.pose3d_jointset.flip(
pred_real_pose_aug
)
pred_real_pose = (pred_real_pose + pred_real_pose_aug) / 2
mpred = pred_real_pose[valid]
_dpred[seq] = mpred
_ = eval_results(_dgt, _dpred, MuPoTSJoints())
dgt = {}
dpred = {}
for seq in seqs:
mgt = _dgt[seq]
mpred = _dpred[seq]
gt_r = get_lengths(mgt)
r, t, p = get_rtp(mpred)
mpred = get_xyz(gt_r, t, p, mgt[:, 14, :])
dgt[seq] = mgt
dpred[seq] = mpred
_ = eval_results(dgt, dpred, MuPoTSJoints())
dgt = {}
dpred = {}
for seq in seqs:
mgt = _dgt[seq]
mpred = _dpred[seq]
gt_r = get_lengths(mgt)
diff = joint2bone(mpred)
dx = diff[:, :, 0]
dy = diff[:, :, 1]
dz = diff[:, :, 2]
adj_dz = np.sign(diff[:, :, 2]) * np.sqrt(np.maximum((gt_r**2) - (dx**2) - (dy**2), 0))
mpred = bone2joint(dx, dy, adj_dz, mgt[:, 14, :])
dgt[seq] = mgt
dpred[seq] = mpred
_ = eval_results(dgt, dpred, MuPoTSJoints())
for i in range(3):
i *= 10
show3Dpose(np.array([mgt[i, ], mpred[i, ]]), MuPoTSJoints(), invert_vertical=True)
plt.show()
print(eval_results({'0': mgt[i:i+1, ]}, {'0': mpred[i:i+1]}, MuPoTSJoints(), verbose=False)[0]['0'])
```
|
github_jupyter
|
import os
os.chdir('../../..')
import numpy as np
import matplotlib.pyplot as plt
from ai import cs
import torch
from databases.joint_sets import MuPoTSJoints
from databases.datasets import PersonStackedMuPoTsDataset, Mpi3dTestDataset
from util.misc import load
from util.viz import *
from util.pose import remove_root
from training.torch_tools import *
from training.preprocess import get_postprocessor, SaveableCompose, MeanNormalize3D
from training.loaders import UnchunkedGenerator
from scripts.eval import load_model
def joint2bone(nd):
cj = get_cjs()
return nd[:, cj[:, 0], :] - nd[:, cj[:, 1], :]
def bone2joint(pred_bx, pred_by, pred_bz, root):
cj = get_cjs()
cj_index = [2, 1, 0, 5, 4, 3, 9, 8, 12, 11, 10, 15, 14, 13, 7, 6]
ordered_cj = cj[cj_index, :]
pred_bxyz = np.stack((pred_bx, pred_by, pred_bz), axis=-1)
res = np.zeros((root.shape[0], 17, 3))
res[:, 14, :] = root
for (a, b), i in zip(ordered_cj, cj_index):
res[:, a, :] = res[:, b, :] + pred_bxyz[:, i, :]
return res
def get_cjs():
connected_joints = MuPoTSJoints().LIMBGRAPH
return np.array(connected_joints)
def get_rtp(nd):
diff = joint2bone(nd)
r, t, p = cs.cart2sp(diff[:, :, 0], diff[:, :, 1], diff[:, :, 2])
return r, t, p
def get_lengths(nd):
r, _, _ = get_rtp(nd)
return r
def get_xyz(r, t, p, root):
pred_bx, pred_by, pred_bz = cs.sp2cart(r, t, p)
return bone2joint(pred_bx, pred_by, pred_bz, root)
model_dir = '../models/4b1006aa968a47139217c9e7ac31e52f/'
config, model = load_model(model_dir)
def get_dataset(config):
data = PersonStackedMuPoTsDataset(
config["pose2d_type"],
config.get("pose3d_scaling", "normal"),
pose_validity="all",
)
# data = Mpi3dTestDataset(
# config["pose2d_type"],
# config.get("pose3d_scaling", "normal"),
# eval_frames_only=True,
# )
return data
dataset = get_dataset(config)
params_path = f"{model_dir}/preprocess_params.pkl"
transform = SaveableCompose.from_file(params_path, dataset, globals())
dataset.transform = transform
assert isinstance(transform.transforms[1].normalizer, MeanNormalize3D)
normalizer3d = transform.transforms[1].normalizer
post_process_func = get_postprocessor(config, dataset, normalizer3d)
augment = True
pad = (model.receptive_field() - 1) // 2
generator = UnchunkedGenerator(dataset, pad, augment)
seqs = sorted(np.unique(dataset.index.seq))
data_3d_mm = {}
preprocessed3d = {}
for seq in seqs:
inds = np.where(dataset.index.seq == seq)[0]
batch = dataset.get_samples(inds, False)
preprocessed3d[seq] = batch["pose3d"][batch["valid_pose"]]
data_3d_mm[seq] = dataset.poses3d[inds][batch["valid_pose"]]
# break
bl = {}
root = {}
org_pose3d = {}
for seq in seqs:
inds = np.where(dataset.index.seq == seq)[0]
batch = dataset.get_samples(inds, False)
bl[seq] = batch["length"][batch["valid_pose"]]
root[seq] = batch["root"][batch["valid_pose"]]
org_pose3d[seq] = batch["org_pose3d"][batch["valid_pose"]]
# break
_dgt = {}
seqs = sorted(np.unique(dataset.index.seq))
for seq in seqs:
inds = np.where(dataset.index.seq == seq)[0]
batch = dataset.get_samples(inds, False)
mgt = dataset.poses3d[inds][batch["valid_pose"]]
_dgt[seq] = mgt
_dpred = {}
raw_preds = {}
losses = {}
with torch.no_grad():
for i, (pose2d, valid) in enumerate(generator):
seq = seqs[i]
pred3d = (
model(torch.from_numpy(pose2d).cuda()).detach().cpu().numpy()
)
raw_preds[seq] = pred3d.copy() # .cpu().numpy()
valid = valid[0]
# pred_bo_np = pred3d[0][valid].reshape([-1, 2, 16])
# if orient_norm is None:
# pass
# elif orient_norm == "_1_1":
# pred_bo_np *= np.pi
# elif orient_norm == "0_1":
# pred_bo_np = (pred_bo_np * 2 * np.pi) - np.pi
# else:
# raise Exception(
# f"Not supported oreitation norm: {self.orient_norm}"
# )
# pred_bo = torch.from_numpy(pred_bo_np).to("cuda")
# orient_pred3d = (
# orient2pose(
# pred_bo,
# # torch.from_numpy(self.bo[seq]).to("cuda"),
# torch.from_numpy(bl[seq]).to("cuda"),
# torch.from_numpy(root[seq]).to("cuda"),
# )
# .cpu()
# .numpy()
# )
# preds[seq] = orient_pred3d
pred_real_pose = post_process_func(pred3d[0], seq)
if augment:
pred_real_pose_aug = post_process_func(pred3d[1], seq)
pred_real_pose_aug[:, :, 0] *= -1
pred_real_pose_aug = dataset.pose3d_jointset.flip(
pred_real_pose_aug
)
pred_real_pose = (pred_real_pose + pred_real_pose_aug) / 2
mpred = pred_real_pose[valid]
_dpred[seq] = mpred
_ = eval_results(_dgt, _dpred, MuPoTSJoints())
dgt = {}
dpred = {}
for seq in seqs:
mgt = _dgt[seq]
mpred = _dpred[seq]
gt_r = get_lengths(mgt)
r, t, p = get_rtp(mpred)
mpred = get_xyz(gt_r, t, p, mgt[:, 14, :])
dgt[seq] = mgt
dpred[seq] = mpred
_ = eval_results(dgt, dpred, MuPoTSJoints())
dgt = {}
dpred = {}
for seq in seqs:
mgt = _dgt[seq]
mpred = _dpred[seq]
gt_r = get_lengths(mgt)
diff = joint2bone(mpred)
dx = diff[:, :, 0]
dy = diff[:, :, 1]
dz = diff[:, :, 2]
adj_dz = np.sign(diff[:, :, 2]) * np.sqrt(np.maximum((gt_r**2) - (dx**2) - (dy**2), 0))
mpred = bone2joint(dx, dy, adj_dz, mgt[:, 14, :])
dgt[seq] = mgt
dpred[seq] = mpred
_ = eval_results(dgt, dpred, MuPoTSJoints())
for i in range(3):
i *= 10
show3Dpose(np.array([mgt[i, ], mpred[i, ]]), MuPoTSJoints(), invert_vertical=True)
plt.show()
print(eval_results({'0': mgt[i:i+1, ]}, {'0': mpred[i:i+1]}, MuPoTSJoints(), verbose=False)[0]['0'])
| 0.460289 | 0.537709 |
# Lesson 1 Exercise 2: Creating a Table with Apache Cassandra
<img src="https://upload.wikimedia.org/wikipedia/commons/thumb/5/5e/Cassandra_logo.svg/1200px-Cassandra_logo.svg.png" width="250" height="250">
### Walk through the basics of Apache Cassandra. Complete the following tasks:<li> Create a table in Apache Cassandra, <li> Insert rows of data,<li> Run a simple SQL query to validate the information. <br>
`#####` denotes where the code needs to be completed.
Note: __Do not__ click the blue Preview button in the lower taskbar
#### Import Apache Cassandra python package
```
import cassandra
```
### Create a connection to the database
```
from cassandra.cluster import Cluster
try:
cluster = Cluster(['127.0.0.1']) #If you have a locally installed Apache Cassandra instance
session = cluster.connect()
except Exception as e:
print(e)
```
### TO-DO: Create a keyspace to do the work in
```
## TO-DO: Create the keyspace
try:
session.execute("""
CREATE KEYSPACE IF NOT EXISTS natan
WITH REPLICATION =
{ 'class' : 'SimpleStrategy', 'replication_factor' : 1 }"""
)
except Exception as e:
print(e)
```
### TO-DO: Connect to the Keyspace
```
## To-Do: Add in the keyspace you created
try:
session.set_keyspace('natan')
except Exception as e:
print(e)
```
### Create a Song Library that contains a list of songs, including the song name, artist name, year, album it was from, and if it was a single.
`song_title
artist_name
year
album_name
single`
### TO-DO: You need to create a table to be able to run the following query:
`select * from songs WHERE year=1970 AND artist_name="The Beatles"`
```
## TO-DO: Complete the query below
query = "CREATE TABLE IF NOT EXISTS songs "
query = query + "(song_title text, artist_name text, year int, album_name text, single text, PRIMARY KEY (year, artist_name))"
try:
session.execute(query)
except Exception as e:
print(e)
```
### TO-DO: Insert the following two rows in your table
`First Row: "Across The Universe", "The Beatles", "1970", "False", "Let It Be"`
`Second Row: "The Beatles", "Think For Yourself", "False", "1965", "Rubber Soul"`
```
## Add in query and then run the insert statement
query = "INSERT INTO songs (song_title, artist_name, year, album_name, single)"
query = query + " VALUES (%s, %s, %s, %s, %s)"
try:
session.execute(query, ("Across The Universe", "The Beatles", 1970, "False", "Let It Be"))
except Exception as e:
print(e)
try:
session.execute(query, ("The Beatles", "Think For Yourself", 1965, "False", "Rubber Soul"))
except Exception as e:
print(e)
```
### TO-DO: Validate your data was inserted into the table.
```
## TO-DO: Complete and then run the select statement to validate the data was inserted into the table
query = 'SELECT * FROM songs'
try:
rows = session.execute(query)
except Exception as e:
print(e)
for row in rows:
print (row.year, row.album_name, row.artist_name)
```
### TO-DO: Validate the Data Model with the original query.
`select * from songs WHERE YEAR=1970 AND artist_name="The Beatles"`
```
##TO-DO: Complete the select statement to run the query
query = "select * from songs where year=1970 and artist_name='The Beatles'"
try:
rows = session.execute(query)
except Exception as e:
print(e)
for row in rows:
print (row.year, row.album_name, row.artist_name)
```
### And Finally close the session and cluster connection
```
session.shutdown()
cluster.shutdown()
```
|
github_jupyter
|
import cassandra
from cassandra.cluster import Cluster
try:
cluster = Cluster(['127.0.0.1']) #If you have a locally installed Apache Cassandra instance
session = cluster.connect()
except Exception as e:
print(e)
## TO-DO: Create the keyspace
try:
session.execute("""
CREATE KEYSPACE IF NOT EXISTS natan
WITH REPLICATION =
{ 'class' : 'SimpleStrategy', 'replication_factor' : 1 }"""
)
except Exception as e:
print(e)
## To-Do: Add in the keyspace you created
try:
session.set_keyspace('natan')
except Exception as e:
print(e)
## TO-DO: Complete the query below
query = "CREATE TABLE IF NOT EXISTS songs "
query = query + "(song_title text, artist_name text, year int, album_name text, single text, PRIMARY KEY (year, artist_name))"
try:
session.execute(query)
except Exception as e:
print(e)
## Add in query and then run the insert statement
query = "INSERT INTO songs (song_title, artist_name, year, album_name, single)"
query = query + " VALUES (%s, %s, %s, %s, %s)"
try:
session.execute(query, ("Across The Universe", "The Beatles", 1970, "False", "Let It Be"))
except Exception as e:
print(e)
try:
session.execute(query, ("The Beatles", "Think For Yourself", 1965, "False", "Rubber Soul"))
except Exception as e:
print(e)
## TO-DO: Complete and then run the select statement to validate the data was inserted into the table
query = 'SELECT * FROM songs'
try:
rows = session.execute(query)
except Exception as e:
print(e)
for row in rows:
print (row.year, row.album_name, row.artist_name)
##TO-DO: Complete the select statement to run the query
query = "select * from songs where year=1970 and artist_name='The Beatles'"
try:
rows = session.execute(query)
except Exception as e:
print(e)
for row in rows:
print (row.year, row.album_name, row.artist_name)
session.shutdown()
cluster.shutdown()
| 0.259263 | 0.942929 |
<a href="https://colab.research.google.com/github/Benjamindavid03/MachineLearningLab/blob/main/Bagging_in_Classification.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Bagging Classifier
A Bagging classifier is an ensemble meta-estimator that fits base classifiers each on random subsets of the original dataset and then aggregate their individual predictions (either by voting or by averaging) to form a final prediction. Such a meta-estimator can typically be used as a way to reduce the variance of a black-box estimator (e.g., a decision tree), by introducing randomization into its construction procedure and then making an ensemble out of it.
Each base classifier is trained in parallel with a training set which is generated by randomly drawing, with replacement, N examples(or data) from the original training dataset – where N is the size of the original training set. Training set for each of the base classifiers is independent of each other. Many of the original data may be repeated in the resulting training set while others may be left out.
Bagging reduces overfitting (variance) by averaging or voting, however, this leads to an increase in bias, which is compensated by the reduction in variance though.
## How Bagging works on training dataset ?
How bagging works on an imaginary training dataset is shown below. Since Bagging resamples the original training dataset with replacement, some instance(or data) may be present multiple times while others are left out.
The algorithm for the classifier generation and classification is given below </p>
<img src= "https://media.geeksforgeeks.org/wp-content/uploads/20190515171714/cb7feb7c-d065-4da7-bea6-5f6017038059.png"/>
<code>
<b>Classifier generation:</b><br/>
Let N be the size of the training set.<br/>
for each of t iterations:<br/>
sample N instances with replacement from the original training set.<br>
apply the learning algorithm to the sample.<br/>
store the resulting classifier.<br/>
<b>Classification:</b></br>
for each of the t classifiers: </br>
predict class of instance using classifier.</br>
return class that was predicted most often.</br>
</code>
```
from sklearn import model_selection
from sklearn.datasets import load_iris
from sklearn.ensemble import BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
import pandas as pd
# load the data
iris = load_iris()
X = iris.data[:, :4]
Y = iris.target
seed = 2
kfold = model_selection.KFold(n_splits = 3,random_state = seed, shuffle= True)
# initialize the base classifier
base_cls = DecisionTreeClassifier()
# no. of base classifier
num_trees = 500
# bagging classifier
model = BaggingClassifier(base_estimator = base_cls,
n_estimators = num_trees,
random_state = seed)
results = model_selection.cross_val_score(model, X, Y, cv = kfold)
print("accuracy :")
print(results.mean())
```
|
github_jupyter
|
from sklearn import model_selection
from sklearn.datasets import load_iris
from sklearn.ensemble import BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
import pandas as pd
# load the data
iris = load_iris()
X = iris.data[:, :4]
Y = iris.target
seed = 2
kfold = model_selection.KFold(n_splits = 3,random_state = seed, shuffle= True)
# initialize the base classifier
base_cls = DecisionTreeClassifier()
# no. of base classifier
num_trees = 500
# bagging classifier
model = BaggingClassifier(base_estimator = base_cls,
n_estimators = num_trees,
random_state = seed)
results = model_selection.cross_val_score(model, X, Y, cv = kfold)
print("accuracy :")
print(results.mean())
| 0.677154 | 0.983327 |
```
import pandas as pd
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.metrics import mean_squared_error
```
## Data Processing
```
df = pd.read_csv('./trips_BGMM.csv')
df.info()
# get df per cluster
res = pd.DataFrame(columns=['Cluster'])
dfs = {}
for c in range(df['Cluster'].max()+1):
# cluster c
res.loc[c, 'Cluster'] = c
df_cur = df[df['Cluster'] == c].drop(columns=['Cluster', 'Year', 'Month', 'Day'])
# get dummies for time
dummies = pd.get_dummies(df_cur['Time'], prefix='Time')
df_cur.drop(columns=['Time'], inplace=True)
df_cur = pd.concat([df_cur, dummies], axis=1)
dfs[c] = df_cur
```
## Ridge Regression
```
from sklearn.linear_model import Ridge
# set pipeline
pipeline = Pipeline([('scaler', StandardScaler()),
('ridge', Ridge())])
# set grid search
params = {'ridge__alpha': [0, 1, 5, 10, 50, 100, 500, 1000]}
ridge_search = GridSearchCV(pipeline, params, cv=3, verbose=2, n_jobs=-1)
res['Ridge Regression'] = 0
for c in dfs:
print('For cluster {}:'.format(c))
# train test split
df_train, df_val = train_test_split(dfs[c], test_size=0.2, random_state=1207)
# get X Y
X_train = df_train.drop(columns=['Checkin per Hour', 'Checkout per Hour'])
Y_train = df_train['Checkin per Hour'] - df_train['Checkout per Hour']
X_val = df_val.drop(columns=['Checkin per Hour', 'Checkout per Hour'])
Y_val = df_val['Checkin per Hour'] - df_val['Checkout per Hour']
# shuffle
X_train, Y_train = shuffle(X_train, Y_train, random_state=1207)
# train model
ridge_search.fit(X_train, Y_train)
print('Best params: {}'.format(ridge_search.best_params_))
# predict
Y_pred = ridge_search.predict(X_val)
mse = mean_squared_error(Y_val, Y_pred)
print('MSE: {}'.format(mse))
r2 = ridge_search.score(X_val, Y_val)
print('R^2: {}'.format(r2))
res.loc[c, 'Ridge Regression'] = r2
print()
# save csv
res.to_csv('./pred_res.csv', index=False)
```
## LASSO Regression
```
from sklearn.linear_model import Lasso
# set pipeline
pipeline = Pipeline([('scaler', StandardScaler()),
('lasso', Lasso())])
# set grid search
params = {'lasso__alpha': [1, 5, 10, 50, 100, 500, 1000]}
lasso_search = GridSearchCV(pipeline, params, cv=3, verbose=2, n_jobs=-1)
res['LASSO Regression'] = 0
for c in dfs:
print('For cluster {}:'.format(c))
# train test split
df_train, df_val = train_test_split(dfs[c], test_size=0.2, random_state=1207)
# get X Y
X_train = df_train.drop(columns=['Checkin per Hour', 'Checkout per Hour'])
Y_train = df_train['Checkin per Hour'] - df_train['Checkout per Hour']
X_val = df_val.drop(columns=['Checkin per Hour', 'Checkout per Hour'])
Y_val = df_val['Checkin per Hour'] - df_val['Checkout per Hour']
# shuffle
X_train, Y_train = shuffle(X_train, Y_train, random_state=1207)
# train model
lasso_search.fit(X_train, Y_train)
print('Best params: {}'.format(lasso_search.best_params_))
# predict
Y_pred = lasso_search.predict(X_val)
mse = mean_squared_error(Y_val, Y_pred)
print('MSE: {}'.format(mse))
r2 = lasso_search.score(X_val, Y_val)
print('R^2: {}'.format(r2))
res.loc[c, 'LASSO Regression'] = r2
print()
# save csv
res.to_csv('./pred_res.csv', index=False)
```
## AdaBoost Regression
```
from sklearn.ensemble import AdaBoostRegressor
# set pipeline
pipeline = Pipeline([('scaler', StandardScaler()),
('ada', AdaBoostRegressor())])
# set grid search
params = {'ada__n_estimators': [10, 50, 100],
'ada__learning_rate' : [0.01, 0.05, 0.1, 0.5],
'ada__loss' : ['linear', 'square', 'exponential']}
ada_search = GridSearchCV(pipeline, params, cv=3, verbose=2, n_jobs=-1)
res['AdaBoost Regression'] = 0
for c in dfs:
print('For cluster {}:'.format(c))
# train test split
df_train, df_val = train_test_split(dfs[c], test_size=0.2, random_state=1207)
# get X Y
X_train = df_train.drop(columns=['Checkin per Hour', 'Checkout per Hour'])
Y_train = df_train['Checkin per Hour'] - df_train['Checkout per Hour']
X_val = df_val.drop(columns=['Checkin per Hour', 'Checkout per Hour'])
Y_val = df_val['Checkin per Hour'] - df_val['Checkout per Hour']
# shuffle
X_train, Y_train = shuffle(X_train, Y_train, random_state=1207)
# train model
ada_search.fit(X_train, Y_train)
print('Best params: {}'.format(ada_search.best_params_))
# predict
Y_pred = ada_search.predict(X_val)
mse = mean_squared_error(Y_val, Y_pred)
print('MSE: {}'.format(mse))
r2 = ada_search.score(X_val, Y_val)
print('R^2: {}'.format(r2))
res.loc[c, 'AdaBoost Regression'] = r2
print()
# save csv
res.to_csv('./pred_res.csv', index=False)
```
## KNN
```
from sklearn.neighbors import KNeighborsRegressor
# set pipeline
pipeline = Pipeline([('scaler', StandardScaler()),
('knn', KNeighborsRegressor())])
# set grid search
params = {'knn__n_neighbors': [3, 5, 11, 19, 23, 29],
'knn__weights': ['uniform', 'distance'],
'knn__metric': ['euclidean', 'manhattan']}
knn_search = GridSearchCV(pipeline, params, cv=3, verbose=2, n_jobs=-1)
res['KNN'] = 0
for c in dfs:
print('For cluster {}:'.format(c))
# train test split
df_train, df_val = train_test_split(dfs[c], test_size=0.2, random_state=1207)
# get X Y
X_train = df_train.drop(columns=['Checkin per Hour', 'Checkout per Hour'])
Y_train = df_train['Checkin per Hour'] - df_train['Checkout per Hour']
X_val = df_val.drop(columns=['Checkin per Hour', 'Checkout per Hour'])
Y_val = df_val['Checkin per Hour'] - df_val['Checkout per Hour']
# shuffle
X_train, Y_train = shuffle(X_train, Y_train, random_state=1207)
# train model
knn_search.fit(X_train, Y_train)
print('Best params: {}'.format(knn_search.best_params_))
# predict
Y_pred = knn_search.predict(X_val)
mse = mean_squared_error(Y_val, Y_pred)
print('MSE: {}'.format(mse))
r2 = knn_search.score(X_val, Y_val)
print('R^2: {}'.format(r2))
res.loc[c, 'KNN'] = r2
print()
# save csv
res.to_csv('./pred_res.csv', index=False)
```
## Decision Tree
```
from sklearn.tree import DecisionTreeRegressor
# set pipeline
pipeline = Pipeline([('scaler', StandardScaler()),
('tree', DecisionTreeRegressor())])
# set grid search
params = {'tree__max_depth': [3, 4, 5, 6, 7, 8, 9, 10, 11, 12]}
tree_search = GridSearchCV(pipeline, params, cv=3, verbose=2, n_jobs=-1)
res['Decision Tree'] = 0
for c in dfs:
print('For cluster {}:'.format(c))
# train test split
df_train, df_val = train_test_split(dfs[c], test_size=0.2, random_state=1207)
# get X Y
X_train = df_train.drop(columns=['Checkin per Hour', 'Checkout per Hour'])
Y_train = df_train['Checkin per Hour'] - df_train['Checkout per Hour']
X_val = df_val.drop(columns=['Checkin per Hour', 'Checkout per Hour'])
Y_val = df_val['Checkin per Hour'] - df_val['Checkout per Hour']
# shuffle
X_train, Y_train = shuffle(X_train, Y_train, random_state=1207)
# train model
tree_search.fit(X_train, Y_train)
print('Best params: {}'.format(tree_search.best_params_))
# predict
Y_pred = tree_search.predict(X_val)
mse = mean_squared_error(Y_val, Y_pred)
print('MSE: {}'.format(mse))
r2 = tree_search.score(X_val, Y_val)
print('R^2: {}'.format(r2))
res.loc[c, 'Decision Tree'] = r2
print()
# save csv
res.to_csv('./pred_res.csv', index=False)
```
## Random Forest
```
from sklearn.ensemble import RandomForestRegressor
# set pipeline
pipeline = Pipeline([('scaler', StandardScaler()),
('rf', RandomForestRegressor())])
# set grid search
params = {'rf__n_estimators': [10, 50, 100, 500],
'rf__max_features': ['auto', 'log2', 'sqrt']}
rf_search = GridSearchCV(pipeline, params, cv=3, verbose=2, n_jobs=-1)
res['Random Forest'] = 0
for c in dfs:
print('For cluster {}:'.format(c))
# train test split
df_train, df_val = train_test_split(dfs[c], test_size=0.2, random_state=1207)
# get X Y
X_train = df_train.drop(columns=['Checkin per Hour', 'Checkout per Hour'])
Y_train = df_train['Checkin per Hour'] - df_train['Checkout per Hour']
X_val = df_val.drop(columns=['Checkin per Hour', 'Checkout per Hour'])
Y_val = df_val['Checkin per Hour'] - df_val['Checkout per Hour']
# shuffle
X_train, Y_train = shuffle(X_train, Y_train, random_state=1207)
# train model
rf_search.fit(X_train, Y_train)
print('Best params: {}'.format(rf_search.best_params_))
# predict
Y_pred = rf_search.predict(X_val)
mse = mean_squared_error(Y_val, Y_pred)
print('MSE: {}'.format(mse))
r2 = rf_search.score(X_val, Y_val)
print('R^2: {}'.format(r2))
res.loc[c, 'Random Forest'] = r2
print()
# save csv
res.to_csv('./pred_res.csv', index=False)
```
## Xgboost
```
import xgboost as xgb
# set pipeline
pipeline = Pipeline([('scaler', StandardScaler()),
('xgb', xgb.XGBRegressor())])
# set grid search
params = {'xgb__min_child_weight': [1, 5, 10],
'xgb__gamma': [0.5, 1, 1.5],
'xgb__subsample': [0.6, 0.8, 1.0],
'xgb__colsample_bytree': [0.6, 0.8, 1.0],
'xgb__max_depth': [3, 4, 5]}
xgb_search = GridSearchCV(pipeline, params, cv=3, verbose=2, n_jobs=-1)
res['Xgboost'] = 0
for c in dfs:
print('For cluster {}:'.format(c))
# train test split
df_train, df_val = train_test_split(dfs[c], test_size=0.2, random_state=1207)
# get X Y
X_train = df_train.drop(columns=['Checkin per Hour', 'Checkout per Hour'])
Y_train = df_train['Checkin per Hour'] - df_train['Checkout per Hour']
X_val = df_val.drop(columns=['Checkin per Hour', 'Checkout per Hour'])
Y_val = df_val['Checkin per Hour'] - df_val['Checkout per Hour']
# shuffle
X_train, Y_train = shuffle(X_train, Y_train, random_state=1207)
# train model
xgb_search.fit(X_train, Y_train)
print('Best params: {}'.format(xgb_search.best_params_))
# predict
Y_pred = xgb_search.predict(X_val)
mse = mean_squared_error(Y_val, Y_pred)
print('MSE: {}'.format(mse))
r2 = xgb_search.score(X_val, Y_val)
print('R^2: {}'.format(r2))
res.loc[c, 'Xgboost'] = r2
print()
# save csv
res.to_csv('./pred_res.csv', index=False)
```
|
github_jupyter
|
import pandas as pd
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.metrics import mean_squared_error
df = pd.read_csv('./trips_BGMM.csv')
df.info()
# get df per cluster
res = pd.DataFrame(columns=['Cluster'])
dfs = {}
for c in range(df['Cluster'].max()+1):
# cluster c
res.loc[c, 'Cluster'] = c
df_cur = df[df['Cluster'] == c].drop(columns=['Cluster', 'Year', 'Month', 'Day'])
# get dummies for time
dummies = pd.get_dummies(df_cur['Time'], prefix='Time')
df_cur.drop(columns=['Time'], inplace=True)
df_cur = pd.concat([df_cur, dummies], axis=1)
dfs[c] = df_cur
from sklearn.linear_model import Ridge
# set pipeline
pipeline = Pipeline([('scaler', StandardScaler()),
('ridge', Ridge())])
# set grid search
params = {'ridge__alpha': [0, 1, 5, 10, 50, 100, 500, 1000]}
ridge_search = GridSearchCV(pipeline, params, cv=3, verbose=2, n_jobs=-1)
res['Ridge Regression'] = 0
for c in dfs:
print('For cluster {}:'.format(c))
# train test split
df_train, df_val = train_test_split(dfs[c], test_size=0.2, random_state=1207)
# get X Y
X_train = df_train.drop(columns=['Checkin per Hour', 'Checkout per Hour'])
Y_train = df_train['Checkin per Hour'] - df_train['Checkout per Hour']
X_val = df_val.drop(columns=['Checkin per Hour', 'Checkout per Hour'])
Y_val = df_val['Checkin per Hour'] - df_val['Checkout per Hour']
# shuffle
X_train, Y_train = shuffle(X_train, Y_train, random_state=1207)
# train model
ridge_search.fit(X_train, Y_train)
print('Best params: {}'.format(ridge_search.best_params_))
# predict
Y_pred = ridge_search.predict(X_val)
mse = mean_squared_error(Y_val, Y_pred)
print('MSE: {}'.format(mse))
r2 = ridge_search.score(X_val, Y_val)
print('R^2: {}'.format(r2))
res.loc[c, 'Ridge Regression'] = r2
print()
# save csv
res.to_csv('./pred_res.csv', index=False)
from sklearn.linear_model import Lasso
# set pipeline
pipeline = Pipeline([('scaler', StandardScaler()),
('lasso', Lasso())])
# set grid search
params = {'lasso__alpha': [1, 5, 10, 50, 100, 500, 1000]}
lasso_search = GridSearchCV(pipeline, params, cv=3, verbose=2, n_jobs=-1)
res['LASSO Regression'] = 0
for c in dfs:
print('For cluster {}:'.format(c))
# train test split
df_train, df_val = train_test_split(dfs[c], test_size=0.2, random_state=1207)
# get X Y
X_train = df_train.drop(columns=['Checkin per Hour', 'Checkout per Hour'])
Y_train = df_train['Checkin per Hour'] - df_train['Checkout per Hour']
X_val = df_val.drop(columns=['Checkin per Hour', 'Checkout per Hour'])
Y_val = df_val['Checkin per Hour'] - df_val['Checkout per Hour']
# shuffle
X_train, Y_train = shuffle(X_train, Y_train, random_state=1207)
# train model
lasso_search.fit(X_train, Y_train)
print('Best params: {}'.format(lasso_search.best_params_))
# predict
Y_pred = lasso_search.predict(X_val)
mse = mean_squared_error(Y_val, Y_pred)
print('MSE: {}'.format(mse))
r2 = lasso_search.score(X_val, Y_val)
print('R^2: {}'.format(r2))
res.loc[c, 'LASSO Regression'] = r2
print()
# save csv
res.to_csv('./pred_res.csv', index=False)
from sklearn.ensemble import AdaBoostRegressor
# set pipeline
pipeline = Pipeline([('scaler', StandardScaler()),
('ada', AdaBoostRegressor())])
# set grid search
params = {'ada__n_estimators': [10, 50, 100],
'ada__learning_rate' : [0.01, 0.05, 0.1, 0.5],
'ada__loss' : ['linear', 'square', 'exponential']}
ada_search = GridSearchCV(pipeline, params, cv=3, verbose=2, n_jobs=-1)
res['AdaBoost Regression'] = 0
for c in dfs:
print('For cluster {}:'.format(c))
# train test split
df_train, df_val = train_test_split(dfs[c], test_size=0.2, random_state=1207)
# get X Y
X_train = df_train.drop(columns=['Checkin per Hour', 'Checkout per Hour'])
Y_train = df_train['Checkin per Hour'] - df_train['Checkout per Hour']
X_val = df_val.drop(columns=['Checkin per Hour', 'Checkout per Hour'])
Y_val = df_val['Checkin per Hour'] - df_val['Checkout per Hour']
# shuffle
X_train, Y_train = shuffle(X_train, Y_train, random_state=1207)
# train model
ada_search.fit(X_train, Y_train)
print('Best params: {}'.format(ada_search.best_params_))
# predict
Y_pred = ada_search.predict(X_val)
mse = mean_squared_error(Y_val, Y_pred)
print('MSE: {}'.format(mse))
r2 = ada_search.score(X_val, Y_val)
print('R^2: {}'.format(r2))
res.loc[c, 'AdaBoost Regression'] = r2
print()
# save csv
res.to_csv('./pred_res.csv', index=False)
from sklearn.neighbors import KNeighborsRegressor
# set pipeline
pipeline = Pipeline([('scaler', StandardScaler()),
('knn', KNeighborsRegressor())])
# set grid search
params = {'knn__n_neighbors': [3, 5, 11, 19, 23, 29],
'knn__weights': ['uniform', 'distance'],
'knn__metric': ['euclidean', 'manhattan']}
knn_search = GridSearchCV(pipeline, params, cv=3, verbose=2, n_jobs=-1)
res['KNN'] = 0
for c in dfs:
print('For cluster {}:'.format(c))
# train test split
df_train, df_val = train_test_split(dfs[c], test_size=0.2, random_state=1207)
# get X Y
X_train = df_train.drop(columns=['Checkin per Hour', 'Checkout per Hour'])
Y_train = df_train['Checkin per Hour'] - df_train['Checkout per Hour']
X_val = df_val.drop(columns=['Checkin per Hour', 'Checkout per Hour'])
Y_val = df_val['Checkin per Hour'] - df_val['Checkout per Hour']
# shuffle
X_train, Y_train = shuffle(X_train, Y_train, random_state=1207)
# train model
knn_search.fit(X_train, Y_train)
print('Best params: {}'.format(knn_search.best_params_))
# predict
Y_pred = knn_search.predict(X_val)
mse = mean_squared_error(Y_val, Y_pred)
print('MSE: {}'.format(mse))
r2 = knn_search.score(X_val, Y_val)
print('R^2: {}'.format(r2))
res.loc[c, 'KNN'] = r2
print()
# save csv
res.to_csv('./pred_res.csv', index=False)
from sklearn.tree import DecisionTreeRegressor
# set pipeline
pipeline = Pipeline([('scaler', StandardScaler()),
('tree', DecisionTreeRegressor())])
# set grid search
params = {'tree__max_depth': [3, 4, 5, 6, 7, 8, 9, 10, 11, 12]}
tree_search = GridSearchCV(pipeline, params, cv=3, verbose=2, n_jobs=-1)
res['Decision Tree'] = 0
for c in dfs:
print('For cluster {}:'.format(c))
# train test split
df_train, df_val = train_test_split(dfs[c], test_size=0.2, random_state=1207)
# get X Y
X_train = df_train.drop(columns=['Checkin per Hour', 'Checkout per Hour'])
Y_train = df_train['Checkin per Hour'] - df_train['Checkout per Hour']
X_val = df_val.drop(columns=['Checkin per Hour', 'Checkout per Hour'])
Y_val = df_val['Checkin per Hour'] - df_val['Checkout per Hour']
# shuffle
X_train, Y_train = shuffle(X_train, Y_train, random_state=1207)
# train model
tree_search.fit(X_train, Y_train)
print('Best params: {}'.format(tree_search.best_params_))
# predict
Y_pred = tree_search.predict(X_val)
mse = mean_squared_error(Y_val, Y_pred)
print('MSE: {}'.format(mse))
r2 = tree_search.score(X_val, Y_val)
print('R^2: {}'.format(r2))
res.loc[c, 'Decision Tree'] = r2
print()
# save csv
res.to_csv('./pred_res.csv', index=False)
from sklearn.ensemble import RandomForestRegressor
# set pipeline
pipeline = Pipeline([('scaler', StandardScaler()),
('rf', RandomForestRegressor())])
# set grid search
params = {'rf__n_estimators': [10, 50, 100, 500],
'rf__max_features': ['auto', 'log2', 'sqrt']}
rf_search = GridSearchCV(pipeline, params, cv=3, verbose=2, n_jobs=-1)
res['Random Forest'] = 0
for c in dfs:
print('For cluster {}:'.format(c))
# train test split
df_train, df_val = train_test_split(dfs[c], test_size=0.2, random_state=1207)
# get X Y
X_train = df_train.drop(columns=['Checkin per Hour', 'Checkout per Hour'])
Y_train = df_train['Checkin per Hour'] - df_train['Checkout per Hour']
X_val = df_val.drop(columns=['Checkin per Hour', 'Checkout per Hour'])
Y_val = df_val['Checkin per Hour'] - df_val['Checkout per Hour']
# shuffle
X_train, Y_train = shuffle(X_train, Y_train, random_state=1207)
# train model
rf_search.fit(X_train, Y_train)
print('Best params: {}'.format(rf_search.best_params_))
# predict
Y_pred = rf_search.predict(X_val)
mse = mean_squared_error(Y_val, Y_pred)
print('MSE: {}'.format(mse))
r2 = rf_search.score(X_val, Y_val)
print('R^2: {}'.format(r2))
res.loc[c, 'Random Forest'] = r2
print()
# save csv
res.to_csv('./pred_res.csv', index=False)
import xgboost as xgb
# set pipeline
pipeline = Pipeline([('scaler', StandardScaler()),
('xgb', xgb.XGBRegressor())])
# set grid search
params = {'xgb__min_child_weight': [1, 5, 10],
'xgb__gamma': [0.5, 1, 1.5],
'xgb__subsample': [0.6, 0.8, 1.0],
'xgb__colsample_bytree': [0.6, 0.8, 1.0],
'xgb__max_depth': [3, 4, 5]}
xgb_search = GridSearchCV(pipeline, params, cv=3, verbose=2, n_jobs=-1)
res['Xgboost'] = 0
for c in dfs:
print('For cluster {}:'.format(c))
# train test split
df_train, df_val = train_test_split(dfs[c], test_size=0.2, random_state=1207)
# get X Y
X_train = df_train.drop(columns=['Checkin per Hour', 'Checkout per Hour'])
Y_train = df_train['Checkin per Hour'] - df_train['Checkout per Hour']
X_val = df_val.drop(columns=['Checkin per Hour', 'Checkout per Hour'])
Y_val = df_val['Checkin per Hour'] - df_val['Checkout per Hour']
# shuffle
X_train, Y_train = shuffle(X_train, Y_train, random_state=1207)
# train model
xgb_search.fit(X_train, Y_train)
print('Best params: {}'.format(xgb_search.best_params_))
# predict
Y_pred = xgb_search.predict(X_val)
mse = mean_squared_error(Y_val, Y_pred)
print('MSE: {}'.format(mse))
r2 = xgb_search.score(X_val, Y_val)
print('R^2: {}'.format(r2))
res.loc[c, 'Xgboost'] = r2
print()
# save csv
res.to_csv('./pred_res.csv', index=False)
| 0.500244 | 0.72812 |
# Logistic Regression Customer Churn
A marketing agency has many customers that use their service to produce ads for the client/customer websites. They've noticed that they have quite a bit of churn in clients. They basically randomly assign account managers right now, but want you to create a machine learning model that will help predict which customers will churn (stop buying their service) so that they can correctly assign the customers most at risk to churn an account manager. Luckily they have some historical data, can you help them out? Create a classification algorithm that will help classify whether or not a customer churned. Then the company can test this against incoming data for future customers to predict which customers will churn and assign them an account manager.
The data is saved as customer_churn.csv. Here are the fields and their definitions:
Name : Name of the latest contact at Company
Age: Customer Age
Total_Purchase: Total Ads Purchased
Account_Manager: Binary 0=No manager, 1= Account manager assigned
Years: Totaly Years as a customer
Num_sites: Number of websites that use the service.
Onboard_date: Date that the name of the latest contact was onboarded
Location: Client HQ Address
Company: Name of Client Company
Once you've created the model and evaluated it, test out the model on some new data (you can think of this almost like a hold-out set) that your client has provided, saved under new_customers.csv. The client wants to know which customers are most likely to churn given this data (they don't have the label yet).
```
import findspark
findspark.init('/home/eissa/spark-2.3.1-bin-hadoop2.7')
from pyspark.sql import SparkSession
# Create Session
spark = SparkSession.builder.appName('logregconsult').getOrCreate()
# Load Data
data = spark.read.csv('customer_churn.csv',inferSchema=True,
header=True)
data.printSchema()
data.describe().show()
data.columns
!pip install numpy
from pyspark.ml.feature import VectorAssembler
# Create Feature Vector
assembler = VectorAssembler(inputCols=['Age',
'Total_Purchase',
'Account_Manager',
'Years',
'Num_Sites'],outputCol='features')
output = assembler.transform(data)
output.select('Features').show()
final_data = output.select('features','churn')
```
### Test Train Split
```
train_churn,test_churn = final_data.randomSplit([0.7,0.3])
```
### Fit the model
```
from pyspark.ml.classification import LogisticRegression
lr_churn = LogisticRegression(labelCol='churn')
fitted_churn_model = lr_churn.fit(train_churn)
training_sum = fitted_churn_model.summary
training_sum.predictions.show()
training_sum.predictions.describe().show()
```
### Evaluate results
```
from pyspark.ml.evaluation import BinaryClassificationEvaluator
pred_and_labels = fitted_churn_model.evaluate(test_churn)
pred_and_labels.predictions.show()
```
### Using AUC
```
churn_eval = BinaryClassificationEvaluator(rawPredictionCol='prediction',
labelCol='churn')
auc = churn_eval.evaluate(pred_and_labels.predictions)
auc
```
### Predict on brand new unlabeled data
```
final_lr_model = lr_churn.fit(final_data)
new_customers = spark.read.csv('new_customers.csv',inferSchema=True,
header=True)
new_customers.printSchema()
test_new_customers = assembler.transform(new_customers)
test_new_customers.printSchema()
final_results = final_lr_model.transform(test_new_customers)
final_results.select('Company','prediction').show()
```
|
github_jupyter
|
import findspark
findspark.init('/home/eissa/spark-2.3.1-bin-hadoop2.7')
from pyspark.sql import SparkSession
# Create Session
spark = SparkSession.builder.appName('logregconsult').getOrCreate()
# Load Data
data = spark.read.csv('customer_churn.csv',inferSchema=True,
header=True)
data.printSchema()
data.describe().show()
data.columns
!pip install numpy
from pyspark.ml.feature import VectorAssembler
# Create Feature Vector
assembler = VectorAssembler(inputCols=['Age',
'Total_Purchase',
'Account_Manager',
'Years',
'Num_Sites'],outputCol='features')
output = assembler.transform(data)
output.select('Features').show()
final_data = output.select('features','churn')
train_churn,test_churn = final_data.randomSplit([0.7,0.3])
from pyspark.ml.classification import LogisticRegression
lr_churn = LogisticRegression(labelCol='churn')
fitted_churn_model = lr_churn.fit(train_churn)
training_sum = fitted_churn_model.summary
training_sum.predictions.show()
training_sum.predictions.describe().show()
from pyspark.ml.evaluation import BinaryClassificationEvaluator
pred_and_labels = fitted_churn_model.evaluate(test_churn)
pred_and_labels.predictions.show()
churn_eval = BinaryClassificationEvaluator(rawPredictionCol='prediction',
labelCol='churn')
auc = churn_eval.evaluate(pred_and_labels.predictions)
auc
final_lr_model = lr_churn.fit(final_data)
new_customers = spark.read.csv('new_customers.csv',inferSchema=True,
header=True)
new_customers.printSchema()
test_new_customers = assembler.transform(new_customers)
test_new_customers.printSchema()
final_results = final_lr_model.transform(test_new_customers)
final_results.select('Company','prediction').show()
| 0.723407 | 0.938969 |
```
#!pip install --user Fiona-1.8.6-cp37-cp37m-win_amd64.whl
#!pip install --user Shapely-1.6.4.post2-cp37-cp37m-win_amd64.whl
#!pip install --user GDAL-2.4.1-cp37-cp37m-win_amd64.whl
from libfunc import *
# xosrm.RequestConfig.host = "127.0.0.1:5000"
xosrm.RequestConfig.host = "52.236.141.167:8080"
'''
Расчет пробега на каждый день недели для точек в заданом порядке
Необходимо указать путь к файлу с расписанием и координаты точки отправления и прибытия
Расписание НЕ ДОЛЖНО содержать ТТ Новый для КПК
Вывод: таблица расстояний
'''
schedule_data_route = open_excel_file(
file_path='C:\\Users\\andre\\OneDrive\\Рабочий стол\\sav\\Odessa\\Расписания\\Бреславська Валерія Володимирівна.xls')
# Словарь с точками разбитый по интервалам и дням недели
full_dict_route = schedule(schedule_data_route)
data = calc_dist(full_dict_route)
# Отображение таблицы расстояний
display(df_first(data))
display(df_second(data))
'''
Расчет TRIP_ROUTE (Жадный алгоритм)
Расчет пробега на каждый день недели для точек с оптимальной последовательностью
Необходимо указать путь к файлу с расписанием
Расписание ДОЛЖНО содержать Новый для КПК первой точкой на день с минимальным индесом (0 или 1)
Вывод: таблица дистанций
'''
file_path='C:\\Users\\andre\\OneDrive\\Рабочий стол\\sav\\Тоба пробег.xls'
schedule_data_trip = open_excel_file(file_path)
short_file_name = file_path.split('\\')[-1].split('.')[0]
# Словарь с точками разбитый по интервалам и дням недели
full_dict_trip = schedule(schedule_data_trip)
# вызов функции
data = calc_trips(full_dict_trip)
# Запись файл excel
write_to_excel(data, short_file_name)
# Отображение таблиц
display(df_first(data[0]))
display(df_second(data[0]))
'''
Расчет оптимальных пробегов для всех расписаний в папке
На вход: путь к папке с файлами расписаний
Вывод: таблицы с пробегами по каждому дню
'''
# Путь к папке с расписаниями
path = 'C:\\Users\\andre\\OneDrive\\Рабочий стол\\sav\\Vinnitsa2\\'
# Вызов функции
calc_folder(path)
from libfunc import *
# xosrm.RequestConfig.host = "127.0.0.1:5000"
xosrm.RequestConfig.host = "52.236.141.167:8080"
path = 'C:\\Users\\andre\\OneDrive\\Рабочий стол\\sav\\Просчет пробегов по ПГ\\Исходники\\Одесса.xlsx'
data = pd.read_excel(path)
unique_val = data['ФИО ТП'].unique()
writer = pd.ExcelWriter('Одесса - расчетные маршруты.xlsx', engine = 'xlsxwriter')
empty_table = pd.DataFrame()
empty_table.to_excel(writer, sheet_name = 'Пробег', index=False)
for index, psr in enumerate(unique_val):
print(psr + ' START')
psr_schedule = data[data['ФИО ТП']==psr]
full_dict_route = schedule(psr_schedule)
result = calc_trips(full_dict_route)
write_to_excel2(writer, result, psr, index)
print(psr + ' FINISHED')
worksheet = writer.sheets['Пробег']
for col in range(0, 17, 5):
worksheet.write(2, 1 + col, 'ПН')
worksheet.write(2, 2 + col, 'ВТ')
worksheet.write(2, 3 + col, 'СР')
worksheet.write(2, 4 + col, 'ЧТ')
worksheet.write(2, 5 + col, 'ПТ')
# Merge 3 cells.
worksheet.merge_range('B1:K1', 'Первая половина месяца')
worksheet.merge_range('L1:U1', 'Вторая половина месяца')
worksheet.merge_range('B2:F2', 'Не четная неделя')
worksheet.merge_range('G2:K2', 'Четная неделя')
worksheet.merge_range('L2:P2', 'Не четная неделя')
worksheet.merge_range('Q2:U2', 'Четная неделя')
writer.save()
writer.close()
print('Файл создан')
```
|
github_jupyter
|
#!pip install --user Fiona-1.8.6-cp37-cp37m-win_amd64.whl
#!pip install --user Shapely-1.6.4.post2-cp37-cp37m-win_amd64.whl
#!pip install --user GDAL-2.4.1-cp37-cp37m-win_amd64.whl
from libfunc import *
# xosrm.RequestConfig.host = "127.0.0.1:5000"
xosrm.RequestConfig.host = "52.236.141.167:8080"
'''
Расчет пробега на каждый день недели для точек в заданом порядке
Необходимо указать путь к файлу с расписанием и координаты точки отправления и прибытия
Расписание НЕ ДОЛЖНО содержать ТТ Новый для КПК
Вывод: таблица расстояний
'''
schedule_data_route = open_excel_file(
file_path='C:\\Users\\andre\\OneDrive\\Рабочий стол\\sav\\Odessa\\Расписания\\Бреславська Валерія Володимирівна.xls')
# Словарь с точками разбитый по интервалам и дням недели
full_dict_route = schedule(schedule_data_route)
data = calc_dist(full_dict_route)
# Отображение таблицы расстояний
display(df_first(data))
display(df_second(data))
'''
Расчет TRIP_ROUTE (Жадный алгоритм)
Расчет пробега на каждый день недели для точек с оптимальной последовательностью
Необходимо указать путь к файлу с расписанием
Расписание ДОЛЖНО содержать Новый для КПК первой точкой на день с минимальным индесом (0 или 1)
Вывод: таблица дистанций
'''
file_path='C:\\Users\\andre\\OneDrive\\Рабочий стол\\sav\\Тоба пробег.xls'
schedule_data_trip = open_excel_file(file_path)
short_file_name = file_path.split('\\')[-1].split('.')[0]
# Словарь с точками разбитый по интервалам и дням недели
full_dict_trip = schedule(schedule_data_trip)
# вызов функции
data = calc_trips(full_dict_trip)
# Запись файл excel
write_to_excel(data, short_file_name)
# Отображение таблиц
display(df_first(data[0]))
display(df_second(data[0]))
'''
Расчет оптимальных пробегов для всех расписаний в папке
На вход: путь к папке с файлами расписаний
Вывод: таблицы с пробегами по каждому дню
'''
# Путь к папке с расписаниями
path = 'C:\\Users\\andre\\OneDrive\\Рабочий стол\\sav\\Vinnitsa2\\'
# Вызов функции
calc_folder(path)
from libfunc import *
# xosrm.RequestConfig.host = "127.0.0.1:5000"
xosrm.RequestConfig.host = "52.236.141.167:8080"
path = 'C:\\Users\\andre\\OneDrive\\Рабочий стол\\sav\\Просчет пробегов по ПГ\\Исходники\\Одесса.xlsx'
data = pd.read_excel(path)
unique_val = data['ФИО ТП'].unique()
writer = pd.ExcelWriter('Одесса - расчетные маршруты.xlsx', engine = 'xlsxwriter')
empty_table = pd.DataFrame()
empty_table.to_excel(writer, sheet_name = 'Пробег', index=False)
for index, psr in enumerate(unique_val):
print(psr + ' START')
psr_schedule = data[data['ФИО ТП']==psr]
full_dict_route = schedule(psr_schedule)
result = calc_trips(full_dict_route)
write_to_excel2(writer, result, psr, index)
print(psr + ' FINISHED')
worksheet = writer.sheets['Пробег']
for col in range(0, 17, 5):
worksheet.write(2, 1 + col, 'ПН')
worksheet.write(2, 2 + col, 'ВТ')
worksheet.write(2, 3 + col, 'СР')
worksheet.write(2, 4 + col, 'ЧТ')
worksheet.write(2, 5 + col, 'ПТ')
# Merge 3 cells.
worksheet.merge_range('B1:K1', 'Первая половина месяца')
worksheet.merge_range('L1:U1', 'Вторая половина месяца')
worksheet.merge_range('B2:F2', 'Не четная неделя')
worksheet.merge_range('G2:K2', 'Четная неделя')
worksheet.merge_range('L2:P2', 'Не четная неделя')
worksheet.merge_range('Q2:U2', 'Четная неделя')
writer.save()
writer.close()
print('Файл создан')
| 0.107754 | 0.252591 |
# Example 9.3: Dual Cycle (Cold-Air-Standard)
*John F. Maddox, Ph.D., P.E.<br>
University of Kentucky - Paducah Campus<br>
ME 321: Engineering Thermodynamics II<br>*
## Problem Statement
A Dual cycle with intake conditions of $300\ \text{K}$ and $1\ \text{bar}$ has a compression ratio of $10$, a maximum pressure of $50\ \text{bar}$ and a maximum temperature of $2000\ \text{K}$. Using a **cold-air-standard analysis**, find the cutoff ratio, net work output (kJ/kg), heat input, and cycle efficiency
* (a) $p$-$v$ diagram
* (b) $T$-$s$ diagram
* (c) $T$,$p$ at each state
* (d) $r_c$
* (e) $q_{in}$
* (f) $w_{net}$
* (g) $\eta_{th}$
* (h) $\text{MEP}$
## Solution
__[Video Explanation](https://uky.yuja.com/V/Video?v=3074248&node=10465176&a=1136783553&autoplay=1)__
### Python Initialization
We'll start by importing the libraries we will use for our analysis and initializing dictionaries to hold the properties we will be usings.
```
from kilojoule.templates.kSI_K import *
air = idealgas.Properties('Air',unit_system='kSI_K')
```
### Given Parameters
We now define variables to hold our known values.
```
T[1] = Quantity(300,'K') # Inlet Temperature
p[1] = Quantity(1,'bar') # Inlet pressure
r = Quantity(10,'') # Compression ratio
p_max = Quantity(50,'bar')# Max pressure
T_max = Quantity(2000,'K')# Max Temperature
Summary();
```
### Assumptions
- Cold-air-standard Analysis
- Ideal gas
- Constant specific heat (evaluated at $25^\circ\text{C}$)
- Maximum pressure occurs at states 3 and 4
- Maximum temperature occurs at state 4
- Negligible changes in kinetic energy
- Negligible changes in potential energy
```
%%showcalc
# Ideal Gas
R = air.R
# Constant thermal properties evaluated at room temperature
T_room = Quantity(25,'degC')
c_v = air.Cv(T=T_room)
c_p = air.Cp(T=T_room)
k = air.k(T=T_room)
# Maximum temperature and pressure
p[3] = p_max
p[4] = p_max
T[4] = T_max
```
#### (c) $T$ and $p$
```
#%%showcalc
# 1-2) Isentropy compression: Isentropic Ideal Gas Relations
T[2] = T[1]*r**(k-1)
p[2] = p[1]*r**k
# 2-3) Constant volume heat addition: Ideal Gas law at both states
T[3] = T[2]*p[3]/p[2]
# 3-4) Constant pressure heat addition: We already know $T_4$ and $p_4$
# 4-5) Isentropic Expansion: Isentropic Ideal Gas Relations
v[1] = R*T[1]/p[1]
v[5] = v[1]
v[4] = R*T[4]/p[4]
T[5] = T[4]*(v[4]/v[5])**(k-1)
p[5] = R*T[5]/v[5]
Calculations()
states.display();
```
#### (b) Cut-off ratio
```
%%showcalc
v[3] = R*T[3]/p[3]
r_c = v[4]/v[3]
```
#### (c) Heat input
```
%%showcalc
# 1st Law 2$\to$3
q_2_to_3 = c_v*(T[3]-T[2])
# 1st Law 3$\to$4
q_3_to_4 = c_p*(T[4]-T[3])
# Total heat input
q_in = q_2_to_3 + q_3_to_4
```
#### (d) $w_{net}$
```
%%showcalc
# 1st Law 5$\to$1
q_5_to_1 = c_v*(T[1]-T[5])
q_out = -q_5_to_1
# 1st Law Full Cycle
w_net = q_in-q_out
```
#### (e) $\eta_{th}$
```
%%showcalc
# Thermal efficiency
eta_th = w_net/q_in
```
#### (f) MEP
```
%%showcalc
# Mean Effective Pressure
v_max = v[1]
v[2] = v[3]
v_min = v[2]
MEP = w_net/(v_max-v_min)
```
### Plotting
*Note: The plotting library uses the property tables to draw the process paths, which inherently assumes variable specific heat (i.e. real-fluid or air-standard assumptions). If the library is used to draw process paths between states that were obtained using constant specific heat (cold-air-standard assumptions) there will be inconsistencies between the state points and the process paths.*
In order to plot the paths of the cycle on the $p$-$v$ diagram and states on the $T$-$s$ diagram, we need discrete values for the specifc enthalpy and entropy at each state rather than just the changes in properties we calculated above. To do this we can pick an arbritrary value for the enthalpy and entropy at any state, then caclulate the enthalpy and entropy at the rest of the states relative to the reference point. For this case, we will look up the properties from the tables for state 1 and use that as our starting point.
#### (a) $p$-$v$
Note: the isentropic lines do not line up exactly with the states in this diagram because the entropies were calculated for variable specific heat, but all other properties were calculated using the
cold-air-standard assumptions (constant specific heat).
```
from math import log
# Add entropy to the property table
states.add_property('s',units='kJ/kg/K')
s = states.dict['s']
pv = air.pv_diagram()
s[1] = air.s(T=T[1],p=p[1])
for i in [2,3,4,5]:
#s[i] = s[1] + c_p*log(T[i]/T[1]) - R*log(p[i]/p[1])
s[i] = air.s(T=T[i],p=p[i])
# plot each state on the p,v diagram and calculate the entropy at each state
for i in range(1,6):
pv.plot_state(states[i],label_loc='north east')
# plot the process paths
pv.plot_process(states[1],states[2],path='isentropic')
pv.plot_process(states[2],states[3],path='isochoric')
pv.plot_process(states[3],states[4],path='isobaric')
pv.plot_process(states[4],states[5],path='isentropic')
pv.plot_process(states[5],states[1],path='isochoric');
```
#### (b) $T$-$s$ diagram
**Note: the isentropic lines are not vertical in this diagram because the entropies were
calculated for variable specific heat**, but all other properties were calculated using the
cold-air-standard assumptions (constant specific heat). Therefore the errors resulting from the constant specific heat assumption are evident in the skewed shape of the cycle on the $T$-$s$ diagram.
```
Ts = air.Ts_diagram()
for i in range(1,6):
Ts.plot_state(states[i],label_loc='north east')
Ts.plot_process(states[1],states[2],label='incorrect trend',path='isentropic')
Ts.plot_process(states[2],states[3],path='isochoric')
Ts.plot_process(states[3],states[4],path='isobaric')
Ts.plot_process(states[4],states[5],label='incorrect trend',path='isentropic')
Ts.plot_process(states[5],states[1],path='isochoric');
Summary();
Summary(['r_c','q_in','w_net','eta_th','MEP']);
```
|
github_jupyter
|
from kilojoule.templates.kSI_K import *
air = idealgas.Properties('Air',unit_system='kSI_K')
T[1] = Quantity(300,'K') # Inlet Temperature
p[1] = Quantity(1,'bar') # Inlet pressure
r = Quantity(10,'') # Compression ratio
p_max = Quantity(50,'bar')# Max pressure
T_max = Quantity(2000,'K')# Max Temperature
Summary();
%%showcalc
# Ideal Gas
R = air.R
# Constant thermal properties evaluated at room temperature
T_room = Quantity(25,'degC')
c_v = air.Cv(T=T_room)
c_p = air.Cp(T=T_room)
k = air.k(T=T_room)
# Maximum temperature and pressure
p[3] = p_max
p[4] = p_max
T[4] = T_max
#%%showcalc
# 1-2) Isentropy compression: Isentropic Ideal Gas Relations
T[2] = T[1]*r**(k-1)
p[2] = p[1]*r**k
# 2-3) Constant volume heat addition: Ideal Gas law at both states
T[3] = T[2]*p[3]/p[2]
# 3-4) Constant pressure heat addition: We already know $T_4$ and $p_4$
# 4-5) Isentropic Expansion: Isentropic Ideal Gas Relations
v[1] = R*T[1]/p[1]
v[5] = v[1]
v[4] = R*T[4]/p[4]
T[5] = T[4]*(v[4]/v[5])**(k-1)
p[5] = R*T[5]/v[5]
Calculations()
states.display();
%%showcalc
v[3] = R*T[3]/p[3]
r_c = v[4]/v[3]
%%showcalc
# 1st Law 2$\to$3
q_2_to_3 = c_v*(T[3]-T[2])
# 1st Law 3$\to$4
q_3_to_4 = c_p*(T[4]-T[3])
# Total heat input
q_in = q_2_to_3 + q_3_to_4
%%showcalc
# 1st Law 5$\to$1
q_5_to_1 = c_v*(T[1]-T[5])
q_out = -q_5_to_1
# 1st Law Full Cycle
w_net = q_in-q_out
%%showcalc
# Thermal efficiency
eta_th = w_net/q_in
%%showcalc
# Mean Effective Pressure
v_max = v[1]
v[2] = v[3]
v_min = v[2]
MEP = w_net/(v_max-v_min)
from math import log
# Add entropy to the property table
states.add_property('s',units='kJ/kg/K')
s = states.dict['s']
pv = air.pv_diagram()
s[1] = air.s(T=T[1],p=p[1])
for i in [2,3,4,5]:
#s[i] = s[1] + c_p*log(T[i]/T[1]) - R*log(p[i]/p[1])
s[i] = air.s(T=T[i],p=p[i])
# plot each state on the p,v diagram and calculate the entropy at each state
for i in range(1,6):
pv.plot_state(states[i],label_loc='north east')
# plot the process paths
pv.plot_process(states[1],states[2],path='isentropic')
pv.plot_process(states[2],states[3],path='isochoric')
pv.plot_process(states[3],states[4],path='isobaric')
pv.plot_process(states[4],states[5],path='isentropic')
pv.plot_process(states[5],states[1],path='isochoric');
Ts = air.Ts_diagram()
for i in range(1,6):
Ts.plot_state(states[i],label_loc='north east')
Ts.plot_process(states[1],states[2],label='incorrect trend',path='isentropic')
Ts.plot_process(states[2],states[3],path='isochoric')
Ts.plot_process(states[3],states[4],path='isobaric')
Ts.plot_process(states[4],states[5],label='incorrect trend',path='isentropic')
Ts.plot_process(states[5],states[1],path='isochoric');
Summary();
Summary(['r_c','q_in','w_net','eta_th','MEP']);
| 0.507568 | 0.953665 |
# 3. Calculations -- Hydrological cycle
```
'''Import packages for loading data, analysing, and plotting'''
import xarray as xr
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import xesmf as xe
%matplotlib inline
import cartopy
import cartopy.crs as ccrs
import matplotlib
from netCDF4 import Dataset
from mpl_toolkits.axes_grid1 import make_axes_locatable
import numpy.ma as ma
import math
import xlrd
import os
import matplotlib.colors as colors
import seaborn as sns
import scipy
from sklearn.metrics import mean_squared_error
from matplotlib.projections import PolarAxes
import mpl_toolkits.axisartist.floating_axes as FA
import mpl_toolkits.axisartist.grid_finder as GF
pmip_v4='PMIP4'
pmip_v3='PMIP3'
pmip={}
pmip['PMIP4']=['AWI-CM-1-1-LR',
'CESM2',
'EC-EARTH-3-3',
'FGOALS-f3-L',
'FGOALS-g3',
'GISS-E2-1-G',
'HadGEM3-GC31',
'IPSL-CM6A-LR',
'MIROC-ES2L',
'MPI-ESM1-2-LR',
'MRI-ESM2-0',
'NESM3',
'NorESM1-F',
'NorESM2',
'UofT-CCSM-4']
pmip['PMIP3']=['BCC-CSM1-1',
'CCSM4',
'CNRM-CM5',
'CSIRO-Mk3L-1-2',
'CSIRO-Mk3-6-0',
'EC-EARTH-2-2',
'FGOALS-g2',
'FGOALS-s2',
'GISS-E2-R',
'HadGEM2-CC',
'HadGEM2-ES',
'IPSL-CM5A-LR',
'KCM1-2-2',
'MIROC-ESM',
'MPI-ESM-P',
'MRI-CGCM3']
#No change needs here
'''Define calculating functions'''
#This function will get all available experiment names
def experimentlist():
exps=[]
file_path = "data"
for dirpaths, dirnames, filenames in os.walk(file_path):
for d in dirnames:
exps.append(d)
return exps
#This function will get all available model names in the experiment
def modellist(experiment_name):
models=[]
file_path = "data/%s" %(experiment_name)
for dirpaths, dirnames, filenames in os.walk(file_path):
for f in filenames:
mname=f.split("_")[0]
models.append(mname)
return models
#This function will get all available filenames in the experiment
def filenamelist(experiment_name):
filenames=[]
file_path = "data/%s" %(experiment_name)
for dirpaths, dirnames, files in os.walk(file_path):
for f in files:
ff='data/%s/%s'%(experiment_name,f)
filenames.append(ff)
return filenames
#This function will identify models in the ensemble
def identify_ensemble_members(variable_name,experiment_name):
datadir="data/%s" %(experiment_name)
ensemble_members=!scripts/find_experiment_ensemble_members.bash {experiment_name} {variable_name} {datadir}
return ensemble_members
#This function will list excat model name
def extract_model_name(filename):
file_no_path=filename.rpartition("/")
file_strings=file_no_path[2].partition("_")
model_name=file_strings[0]
return model_name
def ensemble_members_dict(variable_name,experiment_name):
ens_mems=identify_ensemble_members(variable_name,experiment_name)
ens_mems_dict={extract_model_name(ens_mems[0]):ens_mems[0]}
for mem in ens_mems[1:]:
ens_mems_dict[extract_model_name(mem)]=mem
return ens_mems_dict
#This function will calculate the ensemble average
def ensemble_mean(pmip_v):
n=0
average=0
grid_1x1= xr.Dataset({'lat': (['lat'], np.arange(-89.5, 90., 1.0)),
'lon': (['lon'], np.arange(-0, 360, 1.0))})
gcm_dict=ensemble_members_dict(variable_name,experiment_name)
for gcm in gcm_dict:
if gcm in pmip[pmip_v]:
this_file=xr.open_dataset(gcm_dict.get(gcm),decode_times=False)
this_var=this_file[variable_name]
this_regridder=xe.Regridder(this_file,grid_1x1,'bilinear', reuse_weights=True,periodic=True)
var_1x1=this_regridder(this_var)
average=(n*average+var_1x1)/(n+1)
n=n+1
ensemble_ave_r=np.zeros((180,360))
for r in range(180):
for c in range(360):
ensemble_ave_r[r][c]=average[r][c-180]
return ensemble_ave_r
#This function will calculate the difference between experiment and piControl for each model,
#and then calculate the ensemble average of the differences
def ensemble_mean_diffence(pmip_v,experiment_name,variable_name):
model_list=[]
n=0
average=0
A_dict=ensemble_members_dict(variable_name,experiment_name)
B_dict=ensemble_members_dict(variable_name,'piControl')
grid_1x1= xr.Dataset({'lat': (['lat'], np.arange(-89.5, 90., 1.0)),
'lon': (['lon'], np.arange(0, 360., 1.0))})
for gcm in A_dict:
if gcm in B_dict:
if gcm in pmip[pmip_v]:
model_list.append(gcm)
expt_a_file=xr.open_dataset(A_dict.get(gcm),decode_times=False)
expt_a=expt_a_file[variable_name]
expt_b_file=xr.open_dataset(B_dict.get(gcm),decode_times=False)
expt_b=expt_b_file[variable_name]
diff=expt_a-expt_b
this_regridder=xe.Regridder(expt_a_file,grid_1x1,'bilinear', reuse_weights=True,periodic=True)
diff_1x1=this_regridder(diff)
average=(n*average+diff_1x1)/(n+1)
n=n+1
ensemble_diff_r=np.zeros((180,360))
for r in range(180):
for c in range(360):
ensemble_diff_r[r][c]=average[r][c-180]
f3='model_lists/%s_%s_%s_ave_modellist.csv' %(experiment_name,variable_name,pmip_v)
modellist=pd.DataFrame(model_list)
modellist.to_csv(f3)
return ensemble_diff_r
#This function will calculate the difference between experiment and piControl for each model,
#and then calculate the ensemble stddev of the differences
def ensemble_stddev(pmip_v,experiment_name,variable_name):
model_list=[]
dataset=[]
A_dict=ensemble_members_dict(variable_name,experiment_name)
B_dict=ensemble_members_dict(variable_name,'piControl')
grid_1x1= xr.Dataset({'lat': (['lat'], np.arange(-89.5, 90., 1.0)),
'lon': (['lon'], np.arange(0, 360., 1.0))})
for gcm in A_dict:
if gcm in B_dict:
if gcm in pmip[pmip_v]:
model_list.append(gcm)
expt_a_file=xr.open_dataset(A_dict.get(gcm),decode_times=False)
expt_a=expt_a_file[variable_name]
expt_b_file=xr.open_dataset(B_dict.get(gcm),decode_times=False)
expt_b=expt_b_file[variable_name]
diff=expt_a-expt_b
this_regridder=xe.Regridder(expt_a_file,grid_1x1,'bilinear', reuse_weights=True,periodic=True)
diff_1x1=this_regridder(diff)
dataset.append(diff_1x1)
data=np.array(dataset)
std=np.std(data,axis=0)
stddev_diff_r=np.zeros((180,360))
for r in range(180):
for c in range(360):
stddev_diff_r[r][c]=std[r][c-180]
f3='model_lists/%s_%s_%s_std_modellist.csv' %(experiment_name,variable_name,pmip_v)
modellist=pd.DataFrame(model_list)
modellist.to_csv(f3)
return stddev_diff_r
#This fuction will plot Robinson projected Geo2D map for averaged precipitation rate in mm/day
def pr_ave_plot(data4,data3,data_diff,experiment_name,variable_name):
cmap=plt.get_cmap('BrBG')
projection = ccrs.Robinson()
transform=ccrs.PlateCarree()
clim=[-1,1]
bounds = np.linspace(-1, 1, 11)
norm = colors.BoundaryNorm(boundaries=bounds, ncolors=256)
fig, axs = plt.subplots(nrows=3,ncols=1,figsize=(10, 18), subplot_kw={'projection': ccrs.Robinson()})
ax1=axs[0]
ax2=axs[1]
ax3=axs[2]
ax1.set_title('PMIP4/CMIP6 ')
ax1.coastlines()
ax1.gridlines()
fig1=ax1.imshow(data4, transform=transform,cmap=cmap,clim=clim,norm=norm)
ax2.set_title('PMIP3/CMIP5')
ax2.coastlines()
ax2.gridlines()
fig2=ax2.imshow(data3, transform=transform,cmap=cmap,clim=clim,norm=norm)
ax3.set_title('PMIP4-PMIP3')
ax3.coastlines()
ax3.gridlines()
fig3=ax3.imshow(data_diff, transform=transform,cmap=cmap,clim=clim,norm=norm)
cax,kw = matplotlib.colorbar.make_axes([ax for ax in axs.flat],location='bottom',pad=0.05,shrink=0.5)
plt.colorbar(fig3, cax=cax, **kw,extend='both')
figname='figs/%s_%s_ave.png' %(experiment_name,variable_name)
plt.savefig(figname)
#Same as above but for uncertainty, i.e. stddev
def pr_std_plot(data4,data3,experiment_name,variable_name):
cmap=plt.get_cmap('YlGn')
clim=[0,1.5]
bounds = np.linspace(0, 1.5, 11)
norm = colors.BoundaryNorm(boundaries=bounds, ncolors=256)
fig, axs = plt.subplots(nrows=2,ncols=1,figsize=(10, 12), subplot_kw={'projection': ccrs.Robinson()})
ax1=axs[0]
ax2=axs[1]
title= 'PMIP4'
ax1.set_title(title)
ax1.coastlines()
ax1.gridlines()
fig1=ax1.imshow(data4, transform=ccrs.PlateCarree(),cmap=cmap,clim=clim,norm=norm)
title= 'PMIP3'
ax2.set_title(title)
ax2.coastlines()
ax2.gridlines()
fig2=ax2.imshow(data3, transform=ccrs.PlateCarree(),cmap=cmap,clim=clim,norm=norm)
cax,kw = matplotlib.colorbar.make_axes([ax for ax in axs.flat],location='bottom',pad=0.05,shrink=0.5)
plt.colorbar(fig2, cax=cax, **kw,extend='max')
figname='figs/%s_%s_std.png' %(experiment_name,variable_name)
plt.savefig(figname)
```
# DJF precip
```
experiment_name='midHolocene-cal-adj'
variable_name='pr_spatialmean_djf'
djfpr_ensemble_ave_v3=ensemble_mean_diffence(pmip_v3,experiment_name,variable_name)
djfpr_ensemble_std_v3=ensemble_stddev(pmip_v3,experiment_name,variable_name)
#PMIP4
djfpr_ensemble_ave_v4=ensemble_mean_diffence(pmip_v4,experiment_name,variable_name)
djfpr_ensemble_std_v4=ensemble_stddev(pmip_v4,experiment_name,variable_name)
#diff
djfpr_ensemble_diff=djfpr_ensemble_ave_v4-djfpr_ensemble_ave_v3
pr_ave_plot(djfpr_ensemble_ave_v4,djfpr_ensemble_ave_v3,djfpr_ensemble_diff,experiment_name,variable_name)
pr_std_plot(djfpr_ensemble_std_v4,djfpr_ensemble_std_v3,experiment_name,variable_name)
d=Dataset('plotting_data/PMIP4_MH_Ensembles_pr_djf.nc','a')
d.variables['pr_djf_ave_v4'][:]=djfpr_ensemble_ave_v4
d.variables['pr_djf_std_v4'][:]=djfpr_ensemble_std_v4
d.variables['pr_djf_ave_v3'][:]=djfpr_ensemble_ave_v3
d.variables['pr_djf_std_v3'][:]=djfpr_ensemble_std_v3
d.variables['pr_djf_ave_diff'][:]=djfpr_ensemble_diff
d.close()
```
# JJA precip
```
experiment_name='midHolocene-cal-adj'
variable_name='pr_spatialmean_jja'
jjapr_ensemble_ave_v3=ensemble_mean_diffence(pmip_v3,experiment_name,variable_name)
jjapr_ensemble_std_v3=ensemble_stddev(pmip_v3,experiment_name,variable_name)
#PMIP4
jjapr_ensemble_ave_v4=ensemble_mean_diffence(pmip_v4,experiment_name,variable_name)
jjapr_ensemble_std_v4=ensemble_stddev(pmip_v4,experiment_name,variable_name)
#diff
jjapr_ensemble_diff=jjapr_ensemble_ave_v4-jjapr_ensemble_ave_v3
pr_ave_plot(jjapr_ensemble_ave_v4,jjapr_ensemble_ave_v3,jjapr_ensemble_diff,experiment_name,variable_name)
pr_std_plot(jjapr_ensemble_std_v4,jjapr_ensemble_std_v3,experiment_name,variable_name)
d=Dataset('plotting_data/PMIP4_MH_Ensembles_pr_jja.nc','a')
d.variables['pr_jja_ave_v4'][:]=jjapr_ensemble_ave_v4
d.variables['pr_jja_std_v4'][:]=jjapr_ensemble_std_v4
d.variables['pr_jja_ave_v3'][:]=jjapr_ensemble_ave_v3
d.variables['pr_jja_std_v3'][:]=jjapr_ensemble_std_v3
d.variables['pr_jja_ave_diff'][:]=jjapr_ensemble_diff
d.close()
```
|
github_jupyter
|
'''Import packages for loading data, analysing, and plotting'''
import xarray as xr
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import xesmf as xe
%matplotlib inline
import cartopy
import cartopy.crs as ccrs
import matplotlib
from netCDF4 import Dataset
from mpl_toolkits.axes_grid1 import make_axes_locatable
import numpy.ma as ma
import math
import xlrd
import os
import matplotlib.colors as colors
import seaborn as sns
import scipy
from sklearn.metrics import mean_squared_error
from matplotlib.projections import PolarAxes
import mpl_toolkits.axisartist.floating_axes as FA
import mpl_toolkits.axisartist.grid_finder as GF
pmip_v4='PMIP4'
pmip_v3='PMIP3'
pmip={}
pmip['PMIP4']=['AWI-CM-1-1-LR',
'CESM2',
'EC-EARTH-3-3',
'FGOALS-f3-L',
'FGOALS-g3',
'GISS-E2-1-G',
'HadGEM3-GC31',
'IPSL-CM6A-LR',
'MIROC-ES2L',
'MPI-ESM1-2-LR',
'MRI-ESM2-0',
'NESM3',
'NorESM1-F',
'NorESM2',
'UofT-CCSM-4']
pmip['PMIP3']=['BCC-CSM1-1',
'CCSM4',
'CNRM-CM5',
'CSIRO-Mk3L-1-2',
'CSIRO-Mk3-6-0',
'EC-EARTH-2-2',
'FGOALS-g2',
'FGOALS-s2',
'GISS-E2-R',
'HadGEM2-CC',
'HadGEM2-ES',
'IPSL-CM5A-LR',
'KCM1-2-2',
'MIROC-ESM',
'MPI-ESM-P',
'MRI-CGCM3']
#No change needs here
'''Define calculating functions'''
#This function will get all available experiment names
def experimentlist():
exps=[]
file_path = "data"
for dirpaths, dirnames, filenames in os.walk(file_path):
for d in dirnames:
exps.append(d)
return exps
#This function will get all available model names in the experiment
def modellist(experiment_name):
models=[]
file_path = "data/%s" %(experiment_name)
for dirpaths, dirnames, filenames in os.walk(file_path):
for f in filenames:
mname=f.split("_")[0]
models.append(mname)
return models
#This function will get all available filenames in the experiment
def filenamelist(experiment_name):
filenames=[]
file_path = "data/%s" %(experiment_name)
for dirpaths, dirnames, files in os.walk(file_path):
for f in files:
ff='data/%s/%s'%(experiment_name,f)
filenames.append(ff)
return filenames
#This function will identify models in the ensemble
def identify_ensemble_members(variable_name,experiment_name):
datadir="data/%s" %(experiment_name)
ensemble_members=!scripts/find_experiment_ensemble_members.bash {experiment_name} {variable_name} {datadir}
return ensemble_members
#This function will list excat model name
def extract_model_name(filename):
file_no_path=filename.rpartition("/")
file_strings=file_no_path[2].partition("_")
model_name=file_strings[0]
return model_name
def ensemble_members_dict(variable_name,experiment_name):
ens_mems=identify_ensemble_members(variable_name,experiment_name)
ens_mems_dict={extract_model_name(ens_mems[0]):ens_mems[0]}
for mem in ens_mems[1:]:
ens_mems_dict[extract_model_name(mem)]=mem
return ens_mems_dict
#This function will calculate the ensemble average
def ensemble_mean(pmip_v):
n=0
average=0
grid_1x1= xr.Dataset({'lat': (['lat'], np.arange(-89.5, 90., 1.0)),
'lon': (['lon'], np.arange(-0, 360, 1.0))})
gcm_dict=ensemble_members_dict(variable_name,experiment_name)
for gcm in gcm_dict:
if gcm in pmip[pmip_v]:
this_file=xr.open_dataset(gcm_dict.get(gcm),decode_times=False)
this_var=this_file[variable_name]
this_regridder=xe.Regridder(this_file,grid_1x1,'bilinear', reuse_weights=True,periodic=True)
var_1x1=this_regridder(this_var)
average=(n*average+var_1x1)/(n+1)
n=n+1
ensemble_ave_r=np.zeros((180,360))
for r in range(180):
for c in range(360):
ensemble_ave_r[r][c]=average[r][c-180]
return ensemble_ave_r
#This function will calculate the difference between experiment and piControl for each model,
#and then calculate the ensemble average of the differences
def ensemble_mean_diffence(pmip_v,experiment_name,variable_name):
model_list=[]
n=0
average=0
A_dict=ensemble_members_dict(variable_name,experiment_name)
B_dict=ensemble_members_dict(variable_name,'piControl')
grid_1x1= xr.Dataset({'lat': (['lat'], np.arange(-89.5, 90., 1.0)),
'lon': (['lon'], np.arange(0, 360., 1.0))})
for gcm in A_dict:
if gcm in B_dict:
if gcm in pmip[pmip_v]:
model_list.append(gcm)
expt_a_file=xr.open_dataset(A_dict.get(gcm),decode_times=False)
expt_a=expt_a_file[variable_name]
expt_b_file=xr.open_dataset(B_dict.get(gcm),decode_times=False)
expt_b=expt_b_file[variable_name]
diff=expt_a-expt_b
this_regridder=xe.Regridder(expt_a_file,grid_1x1,'bilinear', reuse_weights=True,periodic=True)
diff_1x1=this_regridder(diff)
average=(n*average+diff_1x1)/(n+1)
n=n+1
ensemble_diff_r=np.zeros((180,360))
for r in range(180):
for c in range(360):
ensemble_diff_r[r][c]=average[r][c-180]
f3='model_lists/%s_%s_%s_ave_modellist.csv' %(experiment_name,variable_name,pmip_v)
modellist=pd.DataFrame(model_list)
modellist.to_csv(f3)
return ensemble_diff_r
#This function will calculate the difference between experiment and piControl for each model,
#and then calculate the ensemble stddev of the differences
def ensemble_stddev(pmip_v,experiment_name,variable_name):
model_list=[]
dataset=[]
A_dict=ensemble_members_dict(variable_name,experiment_name)
B_dict=ensemble_members_dict(variable_name,'piControl')
grid_1x1= xr.Dataset({'lat': (['lat'], np.arange(-89.5, 90., 1.0)),
'lon': (['lon'], np.arange(0, 360., 1.0))})
for gcm in A_dict:
if gcm in B_dict:
if gcm in pmip[pmip_v]:
model_list.append(gcm)
expt_a_file=xr.open_dataset(A_dict.get(gcm),decode_times=False)
expt_a=expt_a_file[variable_name]
expt_b_file=xr.open_dataset(B_dict.get(gcm),decode_times=False)
expt_b=expt_b_file[variable_name]
diff=expt_a-expt_b
this_regridder=xe.Regridder(expt_a_file,grid_1x1,'bilinear', reuse_weights=True,periodic=True)
diff_1x1=this_regridder(diff)
dataset.append(diff_1x1)
data=np.array(dataset)
std=np.std(data,axis=0)
stddev_diff_r=np.zeros((180,360))
for r in range(180):
for c in range(360):
stddev_diff_r[r][c]=std[r][c-180]
f3='model_lists/%s_%s_%s_std_modellist.csv' %(experiment_name,variable_name,pmip_v)
modellist=pd.DataFrame(model_list)
modellist.to_csv(f3)
return stddev_diff_r
#This fuction will plot Robinson projected Geo2D map for averaged precipitation rate in mm/day
def pr_ave_plot(data4,data3,data_diff,experiment_name,variable_name):
cmap=plt.get_cmap('BrBG')
projection = ccrs.Robinson()
transform=ccrs.PlateCarree()
clim=[-1,1]
bounds = np.linspace(-1, 1, 11)
norm = colors.BoundaryNorm(boundaries=bounds, ncolors=256)
fig, axs = plt.subplots(nrows=3,ncols=1,figsize=(10, 18), subplot_kw={'projection': ccrs.Robinson()})
ax1=axs[0]
ax2=axs[1]
ax3=axs[2]
ax1.set_title('PMIP4/CMIP6 ')
ax1.coastlines()
ax1.gridlines()
fig1=ax1.imshow(data4, transform=transform,cmap=cmap,clim=clim,norm=norm)
ax2.set_title('PMIP3/CMIP5')
ax2.coastlines()
ax2.gridlines()
fig2=ax2.imshow(data3, transform=transform,cmap=cmap,clim=clim,norm=norm)
ax3.set_title('PMIP4-PMIP3')
ax3.coastlines()
ax3.gridlines()
fig3=ax3.imshow(data_diff, transform=transform,cmap=cmap,clim=clim,norm=norm)
cax,kw = matplotlib.colorbar.make_axes([ax for ax in axs.flat],location='bottom',pad=0.05,shrink=0.5)
plt.colorbar(fig3, cax=cax, **kw,extend='both')
figname='figs/%s_%s_ave.png' %(experiment_name,variable_name)
plt.savefig(figname)
#Same as above but for uncertainty, i.e. stddev
def pr_std_plot(data4,data3,experiment_name,variable_name):
cmap=plt.get_cmap('YlGn')
clim=[0,1.5]
bounds = np.linspace(0, 1.5, 11)
norm = colors.BoundaryNorm(boundaries=bounds, ncolors=256)
fig, axs = plt.subplots(nrows=2,ncols=1,figsize=(10, 12), subplot_kw={'projection': ccrs.Robinson()})
ax1=axs[0]
ax2=axs[1]
title= 'PMIP4'
ax1.set_title(title)
ax1.coastlines()
ax1.gridlines()
fig1=ax1.imshow(data4, transform=ccrs.PlateCarree(),cmap=cmap,clim=clim,norm=norm)
title= 'PMIP3'
ax2.set_title(title)
ax2.coastlines()
ax2.gridlines()
fig2=ax2.imshow(data3, transform=ccrs.PlateCarree(),cmap=cmap,clim=clim,norm=norm)
cax,kw = matplotlib.colorbar.make_axes([ax for ax in axs.flat],location='bottom',pad=0.05,shrink=0.5)
plt.colorbar(fig2, cax=cax, **kw,extend='max')
figname='figs/%s_%s_std.png' %(experiment_name,variable_name)
plt.savefig(figname)
experiment_name='midHolocene-cal-adj'
variable_name='pr_spatialmean_djf'
djfpr_ensemble_ave_v3=ensemble_mean_diffence(pmip_v3,experiment_name,variable_name)
djfpr_ensemble_std_v3=ensemble_stddev(pmip_v3,experiment_name,variable_name)
#PMIP4
djfpr_ensemble_ave_v4=ensemble_mean_diffence(pmip_v4,experiment_name,variable_name)
djfpr_ensemble_std_v4=ensemble_stddev(pmip_v4,experiment_name,variable_name)
#diff
djfpr_ensemble_diff=djfpr_ensemble_ave_v4-djfpr_ensemble_ave_v3
pr_ave_plot(djfpr_ensemble_ave_v4,djfpr_ensemble_ave_v3,djfpr_ensemble_diff,experiment_name,variable_name)
pr_std_plot(djfpr_ensemble_std_v4,djfpr_ensemble_std_v3,experiment_name,variable_name)
d=Dataset('plotting_data/PMIP4_MH_Ensembles_pr_djf.nc','a')
d.variables['pr_djf_ave_v4'][:]=djfpr_ensemble_ave_v4
d.variables['pr_djf_std_v4'][:]=djfpr_ensemble_std_v4
d.variables['pr_djf_ave_v3'][:]=djfpr_ensemble_ave_v3
d.variables['pr_djf_std_v3'][:]=djfpr_ensemble_std_v3
d.variables['pr_djf_ave_diff'][:]=djfpr_ensemble_diff
d.close()
experiment_name='midHolocene-cal-adj'
variable_name='pr_spatialmean_jja'
jjapr_ensemble_ave_v3=ensemble_mean_diffence(pmip_v3,experiment_name,variable_name)
jjapr_ensemble_std_v3=ensemble_stddev(pmip_v3,experiment_name,variable_name)
#PMIP4
jjapr_ensemble_ave_v4=ensemble_mean_diffence(pmip_v4,experiment_name,variable_name)
jjapr_ensemble_std_v4=ensemble_stddev(pmip_v4,experiment_name,variable_name)
#diff
jjapr_ensemble_diff=jjapr_ensemble_ave_v4-jjapr_ensemble_ave_v3
pr_ave_plot(jjapr_ensemble_ave_v4,jjapr_ensemble_ave_v3,jjapr_ensemble_diff,experiment_name,variable_name)
pr_std_plot(jjapr_ensemble_std_v4,jjapr_ensemble_std_v3,experiment_name,variable_name)
d=Dataset('plotting_data/PMIP4_MH_Ensembles_pr_jja.nc','a')
d.variables['pr_jja_ave_v4'][:]=jjapr_ensemble_ave_v4
d.variables['pr_jja_std_v4'][:]=jjapr_ensemble_std_v4
d.variables['pr_jja_ave_v3'][:]=jjapr_ensemble_ave_v3
d.variables['pr_jja_std_v3'][:]=jjapr_ensemble_std_v3
d.variables['pr_jja_ave_diff'][:]=jjapr_ensemble_diff
d.close()
| 0.31384 | 0.705252 |
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
from functools import reduce
df = pd.read_csv(os.path.join('..', 'data.csv'), sep=',')
df.head()
inputRotations = ['inputR1', 'inputR2', 'inputR3']
inputTranslations = ['inputT1', 'inputT2', 'inputT3']
inputs = inputRotations + inputTranslations
outputRotations = ['outputR1', 'outputR2', 'outputR3']
outputTranslations = ['outputT1', 'outputR2', 'outputT3']
outputs = outputRotations + outputTranslations
inRot = [df[col] for col in inputRotations]
inTrans = [df[col] for col in inputTranslations]
inAny = inRot + inTrans
outRot = [df[col] for col in outputRotations]
outTrans = [df[col] for col in outputTranslations]
outAny = outRot + outTrans
```
# Total Number of Output Rotations depending on ...
```
totalInputs = reduce((lambda x, y: x + y), inAny)
totalOutputRotations = reduce((lambda x, y: x + y), outRot)
plt.plot(totalInputs, totalOutputRotations, 'ro')
plt.xlabel('totalInputs')
plt.ylabel('totalOutputRotations')
plt.show()
totalInputTranslations = reduce((lambda x, y: x + y), inTrans)
totalOutputRotations = reduce((lambda x, y: x + y), outRot)
plt.plot(totalInputTranslations, totalOutputRotations, 'ro')
plt.xlabel('totalInputTranslations')
plt.ylabel('totalOutputRotations')
plt.show()
totalInputRotations = reduce((lambda x, y: x + y), inRot)
totalOutputRotations = reduce((lambda x, y: x + y), outRot)
plt.plot(totalInputRotations, totalOutputRotations, 'ro')
plt.xlabel('totalInputRotations')
plt.ylabel('totalOutputRotations')
plt.show()
```
# Total Number of Output Translations depending on ...
```
totalInputs = reduce((lambda x, y: x + y), inAny)
totalOutputTranslations = reduce((lambda x, y: x + y), outTrans)
plt.plot(totalInputs, totalOutputTranslations, 'ro')
plt.xlabel('totalInputs')
plt.ylabel('totalOutputTranslation')
plt.show()
totalInputTranslations = reduce((lambda x, y: x + y), inTrans)
totalOutputTranslations = reduce((lambda x, y: x + y), outTrans)
plt.plot(totalInputTranslations, totalOutputTranslations, 'ro')
plt.xlabel('totalInputTranslations')
plt.ylabel('totalOutputTranslation')
plt.show()
totalInputRotations = reduce((lambda x, y: x + y), inRot)
totalOutputTranslations = reduce((lambda x, y: x + y), outTrans)
plt.plot(totalInputRotations, totalOutputTranslations, 'ro')
plt.xlabel('totalInputRotations')
plt.ylabel('totalOutputTranslation')
plt.show()
```
# Total Number of Outputs depending on ...
```
totalInputs = reduce((lambda x, y: x + y), inAny)
totalOutputs = reduce((lambda x, y: x + y), outAny)
plt.plot(totalInputs, totalOutputs, 'ro')
plt.xlabel('totalInputs')
plt.ylabel('totalOutputs')
plt.show()
totalInputTranslations = reduce((lambda x, y: x + y), inTrans)
totalOutputs = reduce((lambda x, y: x + y), outAny)
plt.plot(totalInputTranslations, totalOutputs, 'ro')
plt.xlabel('totalInputTranslations')
plt.ylabel('totalOutputs')
plt.show()
totalInputRotations = reduce((lambda x, y: x + y), inRot)
totalOutputs = reduce((lambda x, y: x + y), outAny)
plt.plot(totalInputRotations, totalOutputs, 'ro')
plt.xlabel('totalInputRotations')
plt.ylabel('totalOutputs')
plt.show()
```
# Misc.
```
totalOutputs = reduce((lambda x, y: x + y), outAny).sort_values()
plt.plot(df['id'], totalOutputs, 'ro')
plt.xlabel('Mechanism ID')
plt.ylabel('totalOutputs')
plt.show()
totalInputs = reduce((lambda x, y: x + y), inAny).sort_values()
plt.plot(df['id'], totalInputs, 'ro')
plt.xlabel('Mechanism ID')
plt.ylabel('totalInputs')
plt.show()
#inRot
commonRotations = [a * b for a,b in zip(inRot, outRot)]
commonRotations
#commonRotations
plt.plot(["x", "y", "z"], commonRotations, 'ro')
plt.xlabel('rotation axis')
plt.ylabel('input and output rotation in common')
plt.show()
plt.plot(df['transmission'].sort_values(), df['id'], 'ro')
plt.xlabel('Transmission')
plt.ylabel('Mechanism ID')
plt.show()
plt.plot(df['id'], df['inputR1'], 'ro')
plt.xlabel('Mechanism ID')
plt.ylabel('Input Rotation on X axis')
plt.show()
```
# Current Dev
|
github_jupyter
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
from functools import reduce
df = pd.read_csv(os.path.join('..', 'data.csv'), sep=',')
df.head()
inputRotations = ['inputR1', 'inputR2', 'inputR3']
inputTranslations = ['inputT1', 'inputT2', 'inputT3']
inputs = inputRotations + inputTranslations
outputRotations = ['outputR1', 'outputR2', 'outputR3']
outputTranslations = ['outputT1', 'outputR2', 'outputT3']
outputs = outputRotations + outputTranslations
inRot = [df[col] for col in inputRotations]
inTrans = [df[col] for col in inputTranslations]
inAny = inRot + inTrans
outRot = [df[col] for col in outputRotations]
outTrans = [df[col] for col in outputTranslations]
outAny = outRot + outTrans
totalInputs = reduce((lambda x, y: x + y), inAny)
totalOutputRotations = reduce((lambda x, y: x + y), outRot)
plt.plot(totalInputs, totalOutputRotations, 'ro')
plt.xlabel('totalInputs')
plt.ylabel('totalOutputRotations')
plt.show()
totalInputTranslations = reduce((lambda x, y: x + y), inTrans)
totalOutputRotations = reduce((lambda x, y: x + y), outRot)
plt.plot(totalInputTranslations, totalOutputRotations, 'ro')
plt.xlabel('totalInputTranslations')
plt.ylabel('totalOutputRotations')
plt.show()
totalInputRotations = reduce((lambda x, y: x + y), inRot)
totalOutputRotations = reduce((lambda x, y: x + y), outRot)
plt.plot(totalInputRotations, totalOutputRotations, 'ro')
plt.xlabel('totalInputRotations')
plt.ylabel('totalOutputRotations')
plt.show()
totalInputs = reduce((lambda x, y: x + y), inAny)
totalOutputTranslations = reduce((lambda x, y: x + y), outTrans)
plt.plot(totalInputs, totalOutputTranslations, 'ro')
plt.xlabel('totalInputs')
plt.ylabel('totalOutputTranslation')
plt.show()
totalInputTranslations = reduce((lambda x, y: x + y), inTrans)
totalOutputTranslations = reduce((lambda x, y: x + y), outTrans)
plt.plot(totalInputTranslations, totalOutputTranslations, 'ro')
plt.xlabel('totalInputTranslations')
plt.ylabel('totalOutputTranslation')
plt.show()
totalInputRotations = reduce((lambda x, y: x + y), inRot)
totalOutputTranslations = reduce((lambda x, y: x + y), outTrans)
plt.plot(totalInputRotations, totalOutputTranslations, 'ro')
plt.xlabel('totalInputRotations')
plt.ylabel('totalOutputTranslation')
plt.show()
totalInputs = reduce((lambda x, y: x + y), inAny)
totalOutputs = reduce((lambda x, y: x + y), outAny)
plt.plot(totalInputs, totalOutputs, 'ro')
plt.xlabel('totalInputs')
plt.ylabel('totalOutputs')
plt.show()
totalInputTranslations = reduce((lambda x, y: x + y), inTrans)
totalOutputs = reduce((lambda x, y: x + y), outAny)
plt.plot(totalInputTranslations, totalOutputs, 'ro')
plt.xlabel('totalInputTranslations')
plt.ylabel('totalOutputs')
plt.show()
totalInputRotations = reduce((lambda x, y: x + y), inRot)
totalOutputs = reduce((lambda x, y: x + y), outAny)
plt.plot(totalInputRotations, totalOutputs, 'ro')
plt.xlabel('totalInputRotations')
plt.ylabel('totalOutputs')
plt.show()
totalOutputs = reduce((lambda x, y: x + y), outAny).sort_values()
plt.plot(df['id'], totalOutputs, 'ro')
plt.xlabel('Mechanism ID')
plt.ylabel('totalOutputs')
plt.show()
totalInputs = reduce((lambda x, y: x + y), inAny).sort_values()
plt.plot(df['id'], totalInputs, 'ro')
plt.xlabel('Mechanism ID')
plt.ylabel('totalInputs')
plt.show()
#inRot
commonRotations = [a * b for a,b in zip(inRot, outRot)]
commonRotations
#commonRotations
plt.plot(["x", "y", "z"], commonRotations, 'ro')
plt.xlabel('rotation axis')
plt.ylabel('input and output rotation in common')
plt.show()
plt.plot(df['transmission'].sort_values(), df['id'], 'ro')
plt.xlabel('Transmission')
plt.ylabel('Mechanism ID')
plt.show()
plt.plot(df['id'], df['inputR1'], 'ro')
plt.xlabel('Mechanism ID')
plt.ylabel('Input Rotation on X axis')
plt.show()
| 0.467089 | 0.726911 |
# Quiz 2
For Penn State student, access quiz [here](https://psu.instructure.com/courses/2177217/quizzes/4421196)
```
import ipywidgets as widgets
```
## Question 1
Consider $f(x,y)=e^{x^2+y^2}$ , compute the Hessian matrix and determine whether $f(x,y)$ is a convex function.
```{dropdown} Show answer
Answer: Hessian matrix is
$$
e^{x^2+y^2}
\begin{bmatrix}
4x^2+2&4xy\\
4xy&4y^2+2
\end{bmatrix}
$$
$f(x,y)$ is not a convex function
```
## Question 2
Given any $w\in R^n,\:b\in R,\:$ consider the multivariable function $f(\boldsymbol x)=e^{\boldsymbol w\cdot \boldsymbol x+b}$ Whether $f(\boldsymbol x)$ is a convex function?
```{dropdown} Show answer
Answer: Yes
```
## Question 3
Consider $f(x,y)=x^2.$ Whether $f(x,y)$ is a $\lambda-$ strongly convex function?
```{dropdown} Show answer
Answer: No
```
## Question 4
Consider
$$
f\left(
\begin{matrix}
x\\
y
\end{matrix}
\right)=x^2+y^2
$$
Given initial guess
$$
\left(
\begin{matrix}
x^0\\
y^0
\end{matrix}
\right)
=
\left(
\begin{matrix}
1\\
2
\end{matrix}
\right), \eta=\frac14
$$
, compute
two steps of the gradient descent method for $f(x,y)$:
$$
\left(
\begin{matrix}
x^{k+1}\\
y^{k+1}
\end{matrix}
\right)
=
\left(
\begin{matrix}
x^k\\
y^k
\end{matrix}
\right)
-
\eta \nabla f\left(
\begin{matrix}
x^k\\
y^k
\end{matrix}
\right), k=0, 1.
$$
```{dropdown} Show answer
Answer: $\frac{1}{4},\frac{1}{2}$
```
## Question 5
Suppose a point $x$ is drawn at random uniformly from the square $[-1,1]\times[-1,1].$ Let
$$
\boldsymbol v= \left(
\begin{matrix}
1\\
1
\end{matrix}
\right)
$$
and consider the random variable $\mathcal X_{\boldsymbol v} ={\boldsymbol x} \cdot {\boldsymbol v}$. What are $\mathbb{E} [\mathcal X_{\boldsymbol v}]$ and $\big(\mathbb{V}[ \mathcal X_{\boldsymbol v}]\big)^2$.
```{dropdown} Show answer
Answer: Unavailable
```
## Question 6
```
def model(100,10):
return nn.Linear(100,10)
```
What are the sizes of W and b of the model?
```{dropdown} Show answer
Answer:
Size of W: torch.Size([10, 100]), Size of b: torch.Size([10])
```
## Question 7
Load MNIST dataset with batch_size=100 as follows
```
trainset = torchvision.datasets.MNIST(root='./data', train= True, download=True,transform=torchvision.transforms.ToTensor())
trainloader = torch.utils.data.DataLoader(trainset, batch_size=100, shuffle=True)
for i, (images, labels) in enumerate(trainloader):
```
What are the sizes of variable images and labels?
```{dropdown} Show answer
Answer: Size of images: torch.Size([100, 1, 28, 28]), Size of labels: torch.Size([100])
```
## Question 8
In the training process of MNIST dataset with mini-batch stochastic gradient descent(SGD) method, if we set bath_size = 600, how many iterations (or SGD steps) are there in one epoch?
```{dropdown} Show answer
Answer: 100
```
## Question 9
What is the output of the following code?
```
sequence = torch.tensor(([[4,2,3],[1,5,6],[0,7,2]]))
maxvalue, index = torch.max(sequence, 1)
print(maxvalue,',',index)
```
```{dropdown} Show answer
Answer: tensor([4, 6, 7]) , tensor([0, 2, 1])
```
## Question 10
What is the output of the following code?
```
num_correct = 0
labels = torch.tensor([1,2,3,4,0,0,0])
predicted = torch.tensor([0,2,3,4,1,2,0])
num_correct += (predicted == labels).sum()
print(num_correct)
```
```{dropdown} Show answer
Answer: tensor([4, 7, 6]) , tensor([0, 2, 1])
```
|
github_jupyter
|
import ipywidgets as widgets
## Question 2
Given any $w\in R^n,\:b\in R,\:$ consider the multivariable function $f(\boldsymbol x)=e^{\boldsymbol w\cdot \boldsymbol x+b}$ Whether $f(\boldsymbol x)$ is a convex function?
## Question 3
Consider $f(x,y)=x^2.$ Whether $f(x,y)$ is a $\lambda-$ strongly convex function?
## Question 4
Consider
$$
f\left(
\begin{matrix}
x\\
y
\end{matrix}
\right)=x^2+y^2
$$
Given initial guess
$$
\left(
\begin{matrix}
x^0\\
y^0
\end{matrix}
\right)
=
\left(
\begin{matrix}
1\\
2
\end{matrix}
\right), \eta=\frac14
$$
, compute
two steps of the gradient descent method for $f(x,y)$:
$$
\left(
\begin{matrix}
x^{k+1}\\
y^{k+1}
\end{matrix}
\right)
=
\left(
\begin{matrix}
x^k\\
y^k
\end{matrix}
\right)
-
\eta \nabla f\left(
\begin{matrix}
x^k\\
y^k
\end{matrix}
\right), k=0, 1.
$$
## Question 5
Suppose a point $x$ is drawn at random uniformly from the square $[-1,1]\times[-1,1].$ Let
$$
\boldsymbol v= \left(
\begin{matrix}
1\\
1
\end{matrix}
\right)
$$
and consider the random variable $\mathcal X_{\boldsymbol v} ={\boldsymbol x} \cdot {\boldsymbol v}$. What are $\mathbb{E} [\mathcal X_{\boldsymbol v}]$ and $\big(\mathbb{V}[ \mathcal X_{\boldsymbol v}]\big)^2$.
## Question 6
What are the sizes of W and b of the model?
## Question 7
Load MNIST dataset with batch_size=100 as follows
What are the sizes of variable images and labels?
## Question 8
In the training process of MNIST dataset with mini-batch stochastic gradient descent(SGD) method, if we set bath_size = 600, how many iterations (or SGD steps) are there in one epoch?
## Question 9
What is the output of the following code?
## Question 10
What is the output of the following code?
| 0.566498 | 0.991456 |
# CS636 Final Project
### group member: Chinghao Sun, Tianqi Xu, Haotian Yin, Chen Ye
### Predict Future Sales
* https://www.kaggle.com/c/competitive-data-science-predict-future-sales/data
Goals
1. We are not competing with other teams on kaggle
2. This project is a playground to practice the knowledge of this class and prepare for the final exam.
3. Group project, 1-4 people per team
4. You can use R or Python
5. Build one prediction model using the ML algorithms of this course
6. Evaluate your prediction model
7. Try different ways to improve your model and show the improvements.
8. Submit code and results in Jupyter and HTML formats on canvas
# Importing all the required libraries at once so that they can systematically be used when required
```
import numpy as np
import pandas as pd
import datetime
import warnings
warnings.simplefilter("ignore")
# ignore all the warning messages but show the errors
sales_train = pd.read_csv('sales_train.csv')
shops = pd.read_csv('shops.csv')
items = pd.read_csv('items.csv')
item_categories = pd.read_csv('item_categories.csv')
test = pd.read_csv('test.csv')
submission = pd.read_csv('sample_submission.csv')
```
# Load all the necessary files for data
```
sales_train.head()
# shop_id with shop
sales_shop = sales_train.merge(shops, on = 'shop_id')
# item_id with items
sales_shop_items = sales_shop.merge(items, on = 'item_id')
sales_shop_items_itmcat = sales_shop_items.merge(item_categories, on ='item_category_id')
sales_train.shape
test.head()
# item_id with items,sales_train
# ID with submission
submission.head()
shops.head()
items.head()
item_categories.head()
df= sales_shop_items_itmcat
df.shape
sales_train.columns
sales_train.head()
```
## Preprocessing data
### Feature selection from data by analysis the problem
Based on the test data, the only features test data give is shop_id and item_id.
In training data set, two more features are given. dat_block_num is the number of month. This information is included in the item of feature date.
item_price is given in training data, but not given in test data. By looking at the training data, we find that the price of items decreases by date. This is just a simple model, and it is not quite straight to predict item_cnt by predicting price.So in this model, we do not consider the effect of price time series.
shop_id also indicates shop_name. Because the name is in Russian and we cannot judge the popularity of shop by its name, so we do not use shop name in our model.
```
sales_train.drop(['date_block_num','item_price'], axis=1, inplace=True)
sales_train['date'] = pd.to_datetime(sales_train['date'], dayfirst=True)
sales_train['date'] = sales_train['date'].apply(lambda x: x.strftime('%Y-%m'))
sales_train.columns
sales_train.head()
```
### Grouping dataset “sales_train” by month.:
Since prediction of October 2015 is asked, each month’s overall data will be needed.
### columns of “shop_id” and “item_id” are needed:
to dataset “test”, in order to identify the forecasting for the work later.
```
df = sales_train.groupby(['date','shop_id','item_id']).sum()
df_train = df.pivot_table(index=['shop_id','item_id'], columns='date', values='item_cnt_day', fill_value=0)
df_train.reset_index(inplace=True)
df_train.head()
```
## Applying model on data
Obviously, the data is a time series. What we want to do is predicting value of time k+1 by values from the first k time.
```
df_test = pd.merge(test, df_train, on=['shop_id','item_id'], how='left')
df_test.drop(['ID', '2013-01'], axis=1, inplace=True)
df_test = df_test.fillna(0)
df_test.head()
```
### We use MultiLinear Regression to deal with the time series
```
from sklearn.linear_model import LinearRegression
from sklearn import neighbors, preprocessing, model_selection
y = df_train['2015-10'].values
X= df_train.drop(['2015-10'], axis = 1)
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.2)
regressor = LinearRegression()
regressor.fit(X_train, y_train) #training the algorithm
```
### result and its evaluation
```
y_pred = regressor.predict(X_test)
y_pred
from sklearn import metrics
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))
print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print('R-squared Error:', metrics.r2_score(y_test, y_pred))
```
### How to improve the model?
First R-squared Error is -0.5611
How to improve the model?
Multilinear Regression is an outlier sensitive model.
Getting rid of outliers by z-score probable can be a solution.
```
from sklearn.preprocessing import StandardScaler
df_train_z=df_train.drop(['shop_id','item_id'],axis=1)
df_train_z.head()
df = StandardScaler().fit_transform(df_train_z.values)
cols = df_train_z.columns
df= pd.DataFrame(df,columns = cols)
df['shop_id']=df_train['shop_id']
df['item_id']=df_train['item_id']
df = df[df.values<=3]
y = df['2015-10'].values
X= df.drop(['2015-10'], axis = 1)
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.2)
regressor = LinearRegression()
regressor.fit(X_train, y_train) #training the algorithm
y_pred = regressor.predict(X_test)
# y_pred
from sklearn import metrics
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))
print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print('R-squared Error:', metrics.r2_score(y_test, y_pred))
```
However, the Filter method by Scaling works on the improvement even is not incredibly efficient.
## improvement attempt 2: Decision Tree
```
from sklearn.tree import DecisionTreeRegressor
dt = DecisionTreeRegressor()
y = df_train['2015-10'].values
X= df_train.drop(['2015-10'], axis = 1)
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.2)
dt.fit(X_train, y_train)
y_pred = dt.predict(X_test)
dt.feature_importances_
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))
print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print('R-squared Error:', metrics.r2_score(y_test, y_pred))
from sklearn.tree import DecisionTreeRegressor
dt = DecisionTreeRegressor()
y = df['2015-10'].values
X= df.drop(['2015-10'], axis = 1)
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.2)
dt.fit(X_train, y_train)
y_pred = dt.predict(X_test)
dt.feature_importances_
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))
print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print('R-squared Error:', metrics.r2_score(y_test, y_pred))
```
The improvement is more efficient on the DecisionTreeRegressor model.
DecisionTreeRegressor model improved from R square 0.1077 to 0.99.
The overfitting should be concerned on this high R square.
## improvement attempt 3: Random Forest
```
from sklearn.ensemble import RandomForestRegressor
df = sales_train.groupby(['date','shop_id','item_id']).sum()
df_train = df.pivot_table(index=['shop_id','item_id'], columns='date', values='item_cnt_day', fill_value=0)
df_train.reset_index(inplace=True)
RFR = RandomForestRegressor(n_estimators = 100)
y = df_train['2015-10'].values
X= df_train.drop(['2015-10'], axis = 1)
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.3,random_state=101)
RFR.fit(X_train, y_train)
y_pred = dt.predict(X_test)
RFR.feature_importances_
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))
print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print('R-squared Error:', metrics.r2_score(y_test, y_pred))
```
We also think of adding item_category as another feature. However, the improvement in result is not quite obvious. So we stick to our attempt on the Decision Tree.
# Evaluation of Model
Because the huge differences of R-square of DecisionTreeRegressor on running multiple times.
Cross validation should be a better evaluation.
```
seed = 1
kfold = model_selection.KFold(n_splits=10)
model = dt
results = model_selection.cross_val_score(model, X, y, cv=kfold, scoring='accuracy')
print('Accuracy -val set: %.2f%% (%.2f)' % (results.mean()*100, results.std()))
```
|
github_jupyter
|
import numpy as np
import pandas as pd
import datetime
import warnings
warnings.simplefilter("ignore")
# ignore all the warning messages but show the errors
sales_train = pd.read_csv('sales_train.csv')
shops = pd.read_csv('shops.csv')
items = pd.read_csv('items.csv')
item_categories = pd.read_csv('item_categories.csv')
test = pd.read_csv('test.csv')
submission = pd.read_csv('sample_submission.csv')
sales_train.head()
# shop_id with shop
sales_shop = sales_train.merge(shops, on = 'shop_id')
# item_id with items
sales_shop_items = sales_shop.merge(items, on = 'item_id')
sales_shop_items_itmcat = sales_shop_items.merge(item_categories, on ='item_category_id')
sales_train.shape
test.head()
# item_id with items,sales_train
# ID with submission
submission.head()
shops.head()
items.head()
item_categories.head()
df= sales_shop_items_itmcat
df.shape
sales_train.columns
sales_train.head()
sales_train.drop(['date_block_num','item_price'], axis=1, inplace=True)
sales_train['date'] = pd.to_datetime(sales_train['date'], dayfirst=True)
sales_train['date'] = sales_train['date'].apply(lambda x: x.strftime('%Y-%m'))
sales_train.columns
sales_train.head()
df = sales_train.groupby(['date','shop_id','item_id']).sum()
df_train = df.pivot_table(index=['shop_id','item_id'], columns='date', values='item_cnt_day', fill_value=0)
df_train.reset_index(inplace=True)
df_train.head()
df_test = pd.merge(test, df_train, on=['shop_id','item_id'], how='left')
df_test.drop(['ID', '2013-01'], axis=1, inplace=True)
df_test = df_test.fillna(0)
df_test.head()
from sklearn.linear_model import LinearRegression
from sklearn import neighbors, preprocessing, model_selection
y = df_train['2015-10'].values
X= df_train.drop(['2015-10'], axis = 1)
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.2)
regressor = LinearRegression()
regressor.fit(X_train, y_train) #training the algorithm
y_pred = regressor.predict(X_test)
y_pred
from sklearn import metrics
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))
print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print('R-squared Error:', metrics.r2_score(y_test, y_pred))
from sklearn.preprocessing import StandardScaler
df_train_z=df_train.drop(['shop_id','item_id'],axis=1)
df_train_z.head()
df = StandardScaler().fit_transform(df_train_z.values)
cols = df_train_z.columns
df= pd.DataFrame(df,columns = cols)
df['shop_id']=df_train['shop_id']
df['item_id']=df_train['item_id']
df = df[df.values<=3]
y = df['2015-10'].values
X= df.drop(['2015-10'], axis = 1)
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.2)
regressor = LinearRegression()
regressor.fit(X_train, y_train) #training the algorithm
y_pred = regressor.predict(X_test)
# y_pred
from sklearn import metrics
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))
print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print('R-squared Error:', metrics.r2_score(y_test, y_pred))
from sklearn.tree import DecisionTreeRegressor
dt = DecisionTreeRegressor()
y = df_train['2015-10'].values
X= df_train.drop(['2015-10'], axis = 1)
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.2)
dt.fit(X_train, y_train)
y_pred = dt.predict(X_test)
dt.feature_importances_
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))
print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print('R-squared Error:', metrics.r2_score(y_test, y_pred))
from sklearn.tree import DecisionTreeRegressor
dt = DecisionTreeRegressor()
y = df['2015-10'].values
X= df.drop(['2015-10'], axis = 1)
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.2)
dt.fit(X_train, y_train)
y_pred = dt.predict(X_test)
dt.feature_importances_
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))
print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print('R-squared Error:', metrics.r2_score(y_test, y_pred))
from sklearn.ensemble import RandomForestRegressor
df = sales_train.groupby(['date','shop_id','item_id']).sum()
df_train = df.pivot_table(index=['shop_id','item_id'], columns='date', values='item_cnt_day', fill_value=0)
df_train.reset_index(inplace=True)
RFR = RandomForestRegressor(n_estimators = 100)
y = df_train['2015-10'].values
X= df_train.drop(['2015-10'], axis = 1)
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.3,random_state=101)
RFR.fit(X_train, y_train)
y_pred = dt.predict(X_test)
RFR.feature_importances_
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))
print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print('R-squared Error:', metrics.r2_score(y_test, y_pred))
seed = 1
kfold = model_selection.KFold(n_splits=10)
model = dt
results = model_selection.cross_val_score(model, X, y, cv=kfold, scoring='accuracy')
print('Accuracy -val set: %.2f%% (%.2f)' % (results.mean()*100, results.std()))
| 0.538255 | 0.826046 |
# DAT257x: Reinforcement Learning Explained
## Lab 6: Function Approximation
### Exercise 6.1: Q-Learning Agent with Linear Function Approximation
```
import numpy as np
import sys
if "../" not in sys.path:
sys.path.append("../")
from lib.envs.simple_rooms import SimpleRoomsEnv
from lib.simulation import Experiment
class Agent(object):
def __init__(self, actions):
self.actions = actions
self.num_actions = len(actions)
def act(self, state):
raise NotImplementedError
class QLearningFAAgent(Agent):
def __init__(self, actions, obs_size, epsilon=0.01, alpha=0.5, gamma=1):
super(QLearningFAAgent, self).__init__(actions)
## TODO 1
## Initialize thetas here
## In addition, initialize the value of epsilon, alpha and gamma
def featureExtractor(self, state, action):
feature = None
actionindex = np.zeros(self.num_actions, dtype=np.int)
actionindex[action] = 1
feature = np.concatenate([actionindex[i] * state for i in self.actions])
return feature
def act(self, state):
## epsilon greedy policy
if np.random.random() < self.epsilon:
i = np.random.randint(0,len(self.actions))
else:
#q = [np.sum(self.theta.transpose() * self.featureExtractor(state, a)) for a in self.actions]
## TODO 2
q = 0 # replace 0 with the correct calculation here
if q.count(max(q)) > 1:
best = [i for i in range(len(self.actions)) if q[i] == max(q)]
i = np.random.choice(best)
else:
i = q.index(max(q))
action = self.actions[i]
return action
def learn(self, state1, action1, reward, state2, done):
"""
Q-learning with FA
theta <- theta + alpha * td_delta * f(s,a)
where
td_delta = reward + gamma * max(Q(s') - Q(s,a))
Q(s,a) = thetas * f(s,a)
max(Q(s')) = max( [ thetas * f(s'a) for a in all actions] )
"""
## TODO 3
## Implement the q-learning update here
maxqnew = 0 # replace 0 with the correct calculation
oldv = 0 # replace 0 with the correct calculation
td_target = reward + self.gamma * maxqnew
td_delta = td_target - oldv
self.theta += self.alpha * 0 # replace 0 with the correct calculation
interactive = True
%matplotlib nbagg
env = SimpleRoomsEnv()
agent = QLearningFAAgent(range(env.action_space.n),16)
experiment = Experiment(env, agent)
experiment.run_qlearning(10, interactive)
interactive = False
%matplotlib inline
env = SimpleRoomsEnv()
agent = QLearningFAAgent(range(env.action_space.n),16)
experiment = Experiment(env, agent)
experiment.run_qlearning(50, interactive)
```
|
github_jupyter
|
import numpy as np
import sys
if "../" not in sys.path:
sys.path.append("../")
from lib.envs.simple_rooms import SimpleRoomsEnv
from lib.simulation import Experiment
class Agent(object):
def __init__(self, actions):
self.actions = actions
self.num_actions = len(actions)
def act(self, state):
raise NotImplementedError
class QLearningFAAgent(Agent):
def __init__(self, actions, obs_size, epsilon=0.01, alpha=0.5, gamma=1):
super(QLearningFAAgent, self).__init__(actions)
## TODO 1
## Initialize thetas here
## In addition, initialize the value of epsilon, alpha and gamma
def featureExtractor(self, state, action):
feature = None
actionindex = np.zeros(self.num_actions, dtype=np.int)
actionindex[action] = 1
feature = np.concatenate([actionindex[i] * state for i in self.actions])
return feature
def act(self, state):
## epsilon greedy policy
if np.random.random() < self.epsilon:
i = np.random.randint(0,len(self.actions))
else:
#q = [np.sum(self.theta.transpose() * self.featureExtractor(state, a)) for a in self.actions]
## TODO 2
q = 0 # replace 0 with the correct calculation here
if q.count(max(q)) > 1:
best = [i for i in range(len(self.actions)) if q[i] == max(q)]
i = np.random.choice(best)
else:
i = q.index(max(q))
action = self.actions[i]
return action
def learn(self, state1, action1, reward, state2, done):
"""
Q-learning with FA
theta <- theta + alpha * td_delta * f(s,a)
where
td_delta = reward + gamma * max(Q(s') - Q(s,a))
Q(s,a) = thetas * f(s,a)
max(Q(s')) = max( [ thetas * f(s'a) for a in all actions] )
"""
## TODO 3
## Implement the q-learning update here
maxqnew = 0 # replace 0 with the correct calculation
oldv = 0 # replace 0 with the correct calculation
td_target = reward + self.gamma * maxqnew
td_delta = td_target - oldv
self.theta += self.alpha * 0 # replace 0 with the correct calculation
interactive = True
%matplotlib nbagg
env = SimpleRoomsEnv()
agent = QLearningFAAgent(range(env.action_space.n),16)
experiment = Experiment(env, agent)
experiment.run_qlearning(10, interactive)
interactive = False
%matplotlib inline
env = SimpleRoomsEnv()
agent = QLearningFAAgent(range(env.action_space.n),16)
experiment = Experiment(env, agent)
experiment.run_qlearning(50, interactive)
| 0.215846 | 0.91114 |
# Basics of molecular graph
MolecularGraph.jl version: 0.10.0
This tutorial includes following fundamental operations of molecular graph.
- Concept of molecular object
- Iterate over graph elements
- Count atom nodes and bond edges
- Neighbors/adjacencies/incidences
- Edit molecular graph
- Subgraph view
```
using Pkg
Pkg.activate("..")
using MolecularGraph
# Demo molecule with atom indices
mol = smilestomol("Clc4cc2c(C(/c1ncccc1CC2)=C3/CCNCC3)cc4")
canvas = SvgCanvas()
draw2d!(canvas, mol)
drawatomindex!(canvas, mol)
molsvg = tosvg(canvas, 300, 300)
display("image/svg+xml", molsvg)
```
## Concept of molecule object
`GraphMol{Atom,Bond}` is a general purpose molecule type often used in MolecularGraph.jl. You can find the definition of it in `src/model/molgraph.jl`
```julia
struct GraphMol{A<:Atom,B<:Bond} <: OrderedGraph
neighbormap::Vector{Dict{Int,Int}}
edges::Vector{Tuple{Int,Int}}
nodeattrs::Vector{A}
edgeattrs::Vector{B}
cache::Dict{Symbol,Any}
attributes::Dict{Symbol,Any}
end
```
GraphMol is a simple graph model with molecule attributes. The graph is represented by `neighbormap` field (size N vector of adjacency dict {edge => node}) and `edges` field (size E vector of tuples (node1, node2)), where N is the number of nodes and E is the number of edges. This model assumes node and edge indices are consecutive, so implicitly indices of `neighbormap`and `edges` vectors correspond to node and edge indices, respectively.
`nodeattrs` field is a size N vector of node attibute objects that have subtype of `Atom`. `Atom` is typically immutable and have atom property values such as atom symbol, charge number, mass number, and etc.
`edgeattrs` fiels is a size E vector of edge attiblute objects that have subtype of `Bond`. `Bond` is also typically immutable and have bond property values such as bond order number and stereochemistry flags.
`cache` is caches of calculated descriptors described later.
`attributes` is attributes of the molecule itself. Typically, SDFile optional fields like `> <compound_name>` will be stored in this field.
## Iterate over graph elements
Calling `GraphMol.nodeattrs` and `GraphMol.edgeattrs` directly is not recommended. Use `nodeattrs(mol)` and `edgeattrs(mol)` interfaces to iterate over elements.
Most of graph related functions are in `Graph` submodule. You can write `using MolecularGraph.Graph` to call these functions conveniently.
```
using MolecularGraph.Graph
println("Atoms:")
for (i, atom) in enumerate(nodeattrs(mol))
print("($(i), $(atom.symbol)), ")
end
println()
println("Bonds:")
for (i, bond) in enumerate(edgeattrs(mol))
print("($(i), $(bond.order)), ")
end
```
## Count atom nodes and bond edges
`Graph.nodecount` and `Graph.edgecount` counts the number of graph elements. Note that these do not include atoms not described in the graph (e.g. Implicit hydrogen).
```
ncnt = nodecount(mol)
ecnt = edgecount(mol)
println("Nodes: $(ncnt)")
println("Edges: $(ecnt)")
```
## Neighbors/adjacencies/incidences
`neighbors(mol, atom1)` returns dict of (`edge` => `atom2`) that means `atom2` is connected to `atom1` through `edge`. This is just an aliase of `mol.neighbormap[atom1]` indeed.
`adjacencies(mol, atom1)` returns a set of adjacent (= connected by edge) nodes. `incidences(mol, atom2)` returns a set of incident (= connecting) edges. These methods generate new sets, therefore are safe for destructive operations like `setdiff!` but a bit more costful than `neighbors`.
```
neighbors(mol, 2)
adjacencies(mol, 2)
incidences(mol, 2)
```
## Edit molecular graph
In MolecularGraph.jl, methods to edit molecules manually are often less accessible intentionally. These method can change molecule objects unexpectedly and will cause serious adverce effect that can affect consistency and reproducibility of your analysis workflow.
In many cases, following alternative methods would suffice.
- Methods in `src/preprocessing.jl`for general cheminformatics operations (see Preprocessing tutorial)
- `removehydrogens`/`addhydrogens` -> deal with implicit/explicit hydrogens
- `extractlargestcomponent` -> desaltation and dehydration
- Standardization of notation in protonation and resonance structure
- Extract substructures by `nodesubgraph` and `edgesubgraph` described below
### Graph topology
There are some methods that directly manipulate graph topology in the graph interface API (e.g. `addnode!`, `addedge!`, `unlinknodes` and `unlinkedges`), but are not recommended to use because insertion and deletion of graph elements are computationally expensive in our molecular graph model based on vectors.
### Attributes of graph elements
Note that types belongs to `Atom` and `Bond` are typically immutable. There are some methods to edit attributes like `setcharge(atom)` and `setorder(bond)` that actually do not change the objects themselves but return new Atom and Bond with attributes editted.
`setnodeattr!(mol, i, atom)` and `setedgeattr!(mol, i, edge)` are interfaces to replace the attribute object at the position i of `mol.nodeattr`/`mol.edgeattr` by the new atom/edge.
Accordingly, code to edit an atom attribute would be like as shown below.
```
mol2 = clone(mol) # I want to use the original mol later
newatom = setcharge(nodeattr(mol2, 8), 1)
setnodeattr!(mol2, 8, newatom)
molsvg = drawsvg(mol2, 300, 300)
display("image/svg+xml", molsvg)
```
## Subgraph view
`SubgraphView` consists of the original graph, node set and edge set, and behaves as almost same as `GraphMol ` object that is substructure of the original graph derived from the node and edge sets.
`nodesubgraph(mol, nodeset)` returns `SubgraphView` object that represents node-induced substructure of `mol` induced from `nodeset`. `edgesubgraph(mol, edgeset)` returns edge-induced `SubgraphView` similarily.
As SubgraphView refers molecule attributes and calculated descriptors of the original molecule, many descriptor calculation methods (and even structure drawing) can be applied to it without problems.
```
subg = nodesubgraph(mol, Set(7:12))
ncnt = nodecount(subg)
ecnt = edgecount(subg)
adj7 = adjacencies(subg, 7)
println("Nodes: $(ncnt)")
println("Edges: $(ecnt)")
println("Adjacecies of atom 7: $(adj7)")
molsvg = drawsvg(subg, 300, 300)
display("image/svg+xml", molsvg)
```
SubgraphView can be nested. But frequent subgraphing can affect performance.
If you want to mine subgraph space deeply, it is recommended to instanciate it as GraphMol by `graphmol(subg)`.
```
subgsubg = nodesubgraph(subg, Set(7:9))
molsvg = drawsvg(subgsubg, 300, 300)
display("image/svg+xml", molsvg)
graphmol(subgsubg)
```
|
github_jupyter
|
using Pkg
Pkg.activate("..")
using MolecularGraph
# Demo molecule with atom indices
mol = smilestomol("Clc4cc2c(C(/c1ncccc1CC2)=C3/CCNCC3)cc4")
canvas = SvgCanvas()
draw2d!(canvas, mol)
drawatomindex!(canvas, mol)
molsvg = tosvg(canvas, 300, 300)
display("image/svg+xml", molsvg)
struct GraphMol{A<:Atom,B<:Bond} <: OrderedGraph
neighbormap::Vector{Dict{Int,Int}}
edges::Vector{Tuple{Int,Int}}
nodeattrs::Vector{A}
edgeattrs::Vector{B}
cache::Dict{Symbol,Any}
attributes::Dict{Symbol,Any}
end
using MolecularGraph.Graph
println("Atoms:")
for (i, atom) in enumerate(nodeattrs(mol))
print("($(i), $(atom.symbol)), ")
end
println()
println("Bonds:")
for (i, bond) in enumerate(edgeattrs(mol))
print("($(i), $(bond.order)), ")
end
ncnt = nodecount(mol)
ecnt = edgecount(mol)
println("Nodes: $(ncnt)")
println("Edges: $(ecnt)")
neighbors(mol, 2)
adjacencies(mol, 2)
incidences(mol, 2)
mol2 = clone(mol) # I want to use the original mol later
newatom = setcharge(nodeattr(mol2, 8), 1)
setnodeattr!(mol2, 8, newatom)
molsvg = drawsvg(mol2, 300, 300)
display("image/svg+xml", molsvg)
subg = nodesubgraph(mol, Set(7:12))
ncnt = nodecount(subg)
ecnt = edgecount(subg)
adj7 = adjacencies(subg, 7)
println("Nodes: $(ncnt)")
println("Edges: $(ecnt)")
println("Adjacecies of atom 7: $(adj7)")
molsvg = drawsvg(subg, 300, 300)
display("image/svg+xml", molsvg)
subgsubg = nodesubgraph(subg, Set(7:9))
molsvg = drawsvg(subgsubg, 300, 300)
display("image/svg+xml", molsvg)
graphmol(subgsubg)
| 0.33372 | 0.985426 |
<a href="https://colab.research.google.com/github/derzhavin3016/CompMath/blob/master/Lab1/Lab1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Домашняя лабораторная работа №1 по вычислительной математике
### Державин Андрей, Б01-909 группа
```
import numpy as np
from matplotlib import pyplot as plt
import sympy as sp
```
## Объявляем некоторые вспомогательные константы
- символ х - для работы sympy
- машинное $\varepsilon$
- массив $h$
```
# Some constants
x_s = sp.symbols('x')
eps = np.finfo(float).eps
def get_h(n):
return 2 / 2 ** (n)
n_arr = np.arange(1, 21)
h_arr = get_h(n_arr)
```
## Выводим формулы для погрешностей методов
### Метод 1
$$
\frac{f(x + h) - f(x)}{h} \approx f'(x) + f''(x)\frac{h}{2} \Rightarrow
\Delta_m = f''(x)\frac{h}{2}
$$
Полная погрешность:
$$
\Delta = \boxed{\frac{2\varepsilon}{h} + f''(x)\frac{h}{2}}
$$
### Метод 2
$$
\frac{f(x + h) - f(x)}{h} \approx f'(x) + f''(x)\frac{h}{2} \Rightarrow
\Delta_m = f''(x)\frac{h}{2}
$$
Полная погрешность:
$$
\Delta = \boxed{\frac{2\varepsilon}{h} + f''(x)\frac{h}{2}}
$$
### Метод 3
$$
\frac{f(x + h) - f(x-h)}{2h} \approx f'(x) + f'''(x)\frac{h^2}{6} \Rightarrow
\Delta_m = f'''(x)\frac{h^2}{6}
$$
Полная погрешность:
$$
\Delta = \boxed{\frac{\varepsilon}{h} + f'''(x)\frac{h^2}{6}}
$$
### Метод 4
$$
\frac{4}{3} \frac{f(x + h) - f(x - h)}{2h}
- \frac{1}{3} \frac{f(x + 2h) - f(x - 2h)}{4h} \approx
\frac{4}{3} \left( f'(x) + f^{(5)}(x) \cdot h^4 / 120 \right) -
\frac{1}{3} \left( f'(x) + f^{(5)}(x) \cdot 16h^4 / 120 \right) \Rightarrow
$$$$
\Delta_{m} = f^{(5)}(x) \cdot \frac{h^4}{3\cdot 120} \left(4-16 \right) =
-f^{(5)}(x) \cdot \frac{h^4}{30}
$$
Полная погрешность:
$$
\Delta = \frac{4\varepsilon}{3h} - \frac{1\varepsilon}{6h} -f^{(5)}(x) \cdot \frac{h^4}{30}
= \boxed{\frac{3}{2}\frac{\varepsilon}{h} -f^{(5)}(x) \cdot \frac{h^4}{30}}
$$
### Метод 5
$$
\frac{3}{2} \frac{f(x + h) - f(x - h)}{2h}
- \frac{3}{5} \frac{f(x + 2h) - f(x - 2h)}{4h}+
\frac{1}{10} \frac{f(x + 3h) - f(x - 3h)}{6h} \approx
\frac{3}{2} \left( f'(x) + f^{(7)}(x) \cdot \frac{h^6}{5040} \right) -
\frac{3}{5} \left( f'(x) + f^{(7)}(x) \cdot 64\frac{h^6}{5040} \right) +
\frac{1}{10} \left( f'(x) + f^{(7)}(x) \cdot 729\frac{h^6}{5040} \right)
\Rightarrow
$$$$
\Delta_{m} = f^{(7)}(x) \cdot \frac{h^6}{10 \cdot 5040}
\left(15 - 384 +729\right) =
f^{(7)}(x) \cdot \frac{h^6}{140}
$$
Полная погрешность:
$$
\Delta = \frac{3\varepsilon}{2h} + \frac{3\varepsilon}{10h} +
\frac{\varepsilon}{30h}
+f^{(7)}(x) \cdot \frac{h^6}{140}
= \boxed{\frac{11}{6}\frac{\varepsilon}{h} + f^{(7)}(x) \cdot \frac{h^6}{140}}
$$
## Реализуем полученные погрешности в функциях
- Функции являются функциями из `sympy`, каждой функции погрешности передается такая функция, точка, а также шаг.
```
def err_1(f, x, h):
return sp.diff(f, x_s, 2).subs(x_s, x).doit() * h / 2 + 2 * eps / h
def err_2(f, x, h):
return 2 * eps / h - sp.diff(f, x_s, 2).subs(x_s, x).doit() * h / 2
def err_3(f, x, h):
return eps / h + sp.diff(f, x_s, 3).subs(x_s, x) * h * h / 6
def err_4(f, x, h):
delta_round = 4 / 3 * eps / h + 1 / 3 * eps / (2 * h)
meth_delta = - sp.diff(f, x_s, 5).subs(x_s, x) * h * h * h * h / 30
return delta_round + meth_delta
def err_5(f, x, h):
delta_round = 1.5 * eps / h + 0.6 * eps / (2 * h) + 0.1 * eps / (3 * h)
meth_delta = sp.diff(f, x_s, 7).subs(x_s, x) * h * h * h * h * h * h / 140
return delta_round + meth_delta
err_func_list = [err_1, err_2, err_3, err_4, err_5]
```
## Массив для исследуемых функций
```
funcs_list = [
sp.sin(x_s ** 2),
sp.cos(sp.sin(x_s)),
sp.exp(sp.sin(sp.cos(x_s))),
sp.log(x_s + 3),
(x_s + 3) ** 0.5
]
```
## Реализуем класс, представляющий метод
- Класс хранит в себе функцию ошибки, получаемую в конструкторе, имеет один метод:
1. `plot` - строит требуемый график для функции `f` в точке `x`, в качестве функции ошибки используется функция, полученная в конструкторе
```
class Method:
# func : func(x)
# err : err(f, x, h)
def __init__(self, err):
self.__err = err
def plot(self, f, x):
apply_x = lambda h: self.__err(f, x, h)
delta_arr = abs(apply_x(h_arr))
plt.loglog(h_arr, delta_arr, label = f'Method #{self.__err.__name__[-1]}')
```
## Реализуем функцию, для более удобного построения графиков
Данная функция принимает на вход символьную (`sympy`) функцию `func` и точку `x` (в которой считать производную), после чего строит на одном рисунке графики зависимости шага от погрешности для каждой функции ошибки.
```
def run_method(func, x = np.pi / 4):
for err in err_func_list:
meth = Method(err)
meth.plot(func, x)
plt.legend()
plt.title(f"f(x) = {str(func)}")
plt.show()
```
## Запустим написанную функцию для каждой из требуемых функций, и получим графики
```
run_method(funcs_list[0])
run_method(funcs_list[1])
run_method(funcs_list[2])
run_method(funcs_list[3])
run_method(funcs_list[4])
```
|
github_jupyter
|
import numpy as np
from matplotlib import pyplot as plt
import sympy as sp
# Some constants
x_s = sp.symbols('x')
eps = np.finfo(float).eps
def get_h(n):
return 2 / 2 ** (n)
n_arr = np.arange(1, 21)
h_arr = get_h(n_arr)
def err_1(f, x, h):
return sp.diff(f, x_s, 2).subs(x_s, x).doit() * h / 2 + 2 * eps / h
def err_2(f, x, h):
return 2 * eps / h - sp.diff(f, x_s, 2).subs(x_s, x).doit() * h / 2
def err_3(f, x, h):
return eps / h + sp.diff(f, x_s, 3).subs(x_s, x) * h * h / 6
def err_4(f, x, h):
delta_round = 4 / 3 * eps / h + 1 / 3 * eps / (2 * h)
meth_delta = - sp.diff(f, x_s, 5).subs(x_s, x) * h * h * h * h / 30
return delta_round + meth_delta
def err_5(f, x, h):
delta_round = 1.5 * eps / h + 0.6 * eps / (2 * h) + 0.1 * eps / (3 * h)
meth_delta = sp.diff(f, x_s, 7).subs(x_s, x) * h * h * h * h * h * h / 140
return delta_round + meth_delta
err_func_list = [err_1, err_2, err_3, err_4, err_5]
funcs_list = [
sp.sin(x_s ** 2),
sp.cos(sp.sin(x_s)),
sp.exp(sp.sin(sp.cos(x_s))),
sp.log(x_s + 3),
(x_s + 3) ** 0.5
]
class Method:
# func : func(x)
# err : err(f, x, h)
def __init__(self, err):
self.__err = err
def plot(self, f, x):
apply_x = lambda h: self.__err(f, x, h)
delta_arr = abs(apply_x(h_arr))
plt.loglog(h_arr, delta_arr, label = f'Method #{self.__err.__name__[-1]}')
def run_method(func, x = np.pi / 4):
for err in err_func_list:
meth = Method(err)
meth.plot(func, x)
plt.legend()
plt.title(f"f(x) = {str(func)}")
plt.show()
run_method(funcs_list[0])
run_method(funcs_list[1])
run_method(funcs_list[2])
run_method(funcs_list[3])
run_method(funcs_list[4])
| 0.453504 | 0.99128 |
# Train your first neural network: basic classification
This guide trains a neural network model to **classify images of clothing, like sneakers and shirts**.
This guide uses [tf.keras](https://www.tensorflow.org/guide/keras), a high-level API to build and train models in TensorFlow.
```
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
```
## Import the Fashion MNIST dataset
This guide uses the Fashion MNIST dataset which contains 70,000 grayscale images in 10 categories. The images show individual articles of clothing at low resolution (28 by 28 pixels)
We will use 60,000 images to train the network and 10,000 images to evaluate how accurately the network learned to classify images.
https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-labels-idx1-ubyte.gz
https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-images-idx3-ubyte.gz
https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-labels-idx1-ubyte.gz
https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-images-idx3-ubyte.gz
Put the four compressed files in the `~/.keras/datasets/fashion-mnist` directory after manual download.
```
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
```
Loading the dataset returns four NumPy arrays:
* The `train_images` and `train_labels` arrays are the *training set*—the data the model uses to learn.
* The model is tested against the *test set*, the `test_images`, and `test_labels` arrays.
**The images are 28x28 NumPy arrays**, with pixel values ranging between 0 and 255. The *labels* are an array of integers, ranging from 0 to 9. These correspond to the *class* of clothing the image represents:
<table>
<tr>
<th>Label</th>
<th>Class</th>
</tr>
<tr>
<td>0</td>
<td>T-shirt/top</td>
</tr>
<tr>
<td>1</td>
<td>Trouser</td>
</tr>
<tr>
<td>2</td>
<td>Pullover</td>
</tr>
<tr>
<td>3</td>
<td>Dress</td>
</tr>
<tr>
<td>4</td>
<td>Coat</td>
</tr>
<tr>
<td>5</td>
<td>Sandal</td>
</tr>
<tr>
<td>6</td>
<td>Shirt</td>
</tr>
<tr>
<td>7</td>
<td>Sneaker</td>
</tr>
<tr>
<td>8</td>
<td>Bag</td>
</tr>
<tr>
<td>9</td>
<td>Ankle boot</td>
</tr>
</table>
Each image is mapped to a single label. Since the *class names* are not included with the dataset, store them here to use later when plotting the images:
```
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
```
## Explore the data
Let's explore the format of the dataset before training the model. The following shows there are 60,000 images in the training set, with each image represented as 28 x 28 pixels:
```
train_images.shape
```
Likewise, there are 60,000 labels in the training set:
```
len(train_labels)
```
Each label is an integer between 0 and 9:
```
train_labels
```
There are 10,000 images in the test set. Again, each image is represented as 28 x 28 pixels:
```
test_images.shape
```
And the test set contains 10,000 images labels:
```
len(test_labels)
```
## Preprocess the data
The data must be preprocessed before training the network. If you inspect the first image in the training set, you will see that the pixel values fall in the range of 0 to 255:
```
plt.figure()
plt.imshow(train_images[0])
plt.colorbar()
plt.grid(False)
```
We **scale these values to a range of 0 to 1** before feeding to the neural network model. For this, cast the datatype of the image components **from an integer to a float, and divide by 255**. Here's the function to preprocess the images:
It's important that the *training set* and the *testing set* are preprocessed in the same way:
```
train_images = train_images / 255.0
test_images = test_images / 255.0
```
Display the first 25 images from the *training set* and display the class name below each image. Verify that the data is in the correct format and we're ready to build and train the network.
```
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
```
## Build the model
Building the neural network requires configuring the layers of the model, then compiling the model.
### Setup the layers
The basic building block of a neural network is the *layer*. Layers extract representations from the data fed into them. And, hopefully, these representations are more meaningful for the problem at hand.
Most of deep learning consists of chaining together simple layers. Most layers, like `tf.keras.layers.Dense`, have parameters that are learned during training.
```
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation=tf.nn.relu),
keras.layers.Dense(10, activation=tf.nn.softmax)
])
```
The first layer in this network, `tf.keras.layers.Flatten`, transforms the format of the images from a 2d-array (of 28 by 28 pixels), **to a 1d-array** of 28 * 28 = 784 pixels. Think of this layer as unstacking rows of pixels in the image and lining them up. This layer has no parameters to learn; it only reformats the data.
After the pixels are flattened, the network consists of a sequence of two `tf.keras.layers.Dense` layers. These are densely-connected, or fully-connected, neural layers. The first `Dense` layer has 128 nodes (or neurons). The second (and last) layer is a 10-node **softmax** layer—**this returns an array of 10 probability scores that sum to 1**. Each node contains a score that indicates the probability that the current image belongs to one of the 10 classes.
### Compile the model
Before the model is ready for training, it needs a few more settings. These are added during the model's *compile* step:
* **Loss function** —This measures how accurate the model is during training. We want to minimize this function to "steer" the model in the right direction.
* **Optimizer** —This is how the model is updated based on the data it sees and its loss function.
* **Metrics** —Used to monitor the training and testing steps. The following example uses *accuracy*, the fraction of the images that are correctly classified.
```
model.compile(optimizer=tf.train.AdamOptimizer(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
```
## Train the model
Training the neural network model requires the following steps:
1. Feed the training data to the model—in this example, the `train_images` and `train_labels` arrays.
2. The model learns to associate images and labels.
3. We ask the model to make predictions about a test set—in this example, the `test_images` array. We verify that the predictions match the labels from the `test_labels` array.
To start training, call the `model.fit` method—the model is "fit" to the training data:
```
model.fit(train_images, train_labels, epochs=5)
```
As the model trains, the loss and accuracy metrics are displayed. This model reaches an accuracy of about 0.88 (or 88%) on the training data.
## Evaluate accuracy
Next, compare how the model performs on the test dataset:
```
test_loss, test_acc = model.evaluate(test_images, test_labels)
print('Test accuracy:', test_acc)
```
It turns out, the accuracy on the test dataset is a little less than the accuracy on the training dataset. This gap between training accuracy and test accuracy is an example of *overfitting*. Overfitting is when a machine learning model performs worse on new data than on their training data.
## Make predictions
With the model trained, we can use it to make predictions about some images.
```
predictions = model.predict(test_images)
```
Here, the model has predicted the label for each image in the testing set. Let's take a look at the first prediction:
```
predictions[0]
```
**A prediction is an array** of 10 numbers. These describe the "confidence" of the model that the image corresponds to each of the 10 different articles of clothing. We can see which label has the highest confidence value:
```
np.argmax(predictions[0])
```
So the model is most confident that this image is an ankle boot, or `class_names[9]`. And we can check the test label to see this is correct:
```
test_labels[0]
```
We can graph this to look at the full set of 10 channels
```
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array[i], true_label[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
```
Let's look at the 0th image, predictions, and prediction array.
```
i = 0
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions, test_labels)
i = 12
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions, test_labels)
```
Let's plot several images with their predictions. Correct prediction labels are blue and incorrect prediction labels are red. The number gives the percent (out of 100) for the predicted label. Note that it can be wrong even when very confident.
```
# Plot the first X test images, their predicted label, and the true label
# Color correct predictions in blue, incorrect predictions in red
num_rows = 5
num_cols = 3
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2*num_cols, 2*i+1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(num_rows, 2*num_cols, 2*i+2)
plot_value_array(i, predictions, test_labels)
```
Finally, use the trained model to make a prediction about a single image.
```
# Grab an image from the test dataset
img = test_images[0]
print(img.shape)
```
`tf.keras` models are optimized to make predictions on a *batch*, or collection, of examples at once. So even though we're using a single image, we need to add it to a list:
```
# Add the image to a batch where it's the only member.
img = (np.expand_dims(img,0))
print(img.shape)
```
Now predict the image:
```
predictions_single = model.predict(img)
print(predictions_single)
plot_value_array(0, predictions_single, test_labels)
_ = plt.xticks(range(10), class_names, rotation=45)
```
`model.predict` returns a list of lists, one for each image in the batch of data. Grab the predictions for our (only) image in the batch:
```
np.argmax(predictions_single[0])
```
And, as before, the model predicts a label of 9.
|
github_jupyter
|
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
train_images.shape
len(train_labels)
train_labels
test_images.shape
len(test_labels)
plt.figure()
plt.imshow(train_images[0])
plt.colorbar()
plt.grid(False)
train_images = train_images / 255.0
test_images = test_images / 255.0
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation=tf.nn.relu),
keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer=tf.train.AdamOptimizer(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=5)
test_loss, test_acc = model.evaluate(test_images, test_labels)
print('Test accuracy:', test_acc)
predictions = model.predict(test_images)
predictions[0]
np.argmax(predictions[0])
test_labels[0]
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array[i], true_label[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
i = 0
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions, test_labels)
i = 12
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions, test_labels)
# Plot the first X test images, their predicted label, and the true label
# Color correct predictions in blue, incorrect predictions in red
num_rows = 5
num_cols = 3
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2*num_cols, 2*i+1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(num_rows, 2*num_cols, 2*i+2)
plot_value_array(i, predictions, test_labels)
# Grab an image from the test dataset
img = test_images[0]
print(img.shape)
# Add the image to a batch where it's the only member.
img = (np.expand_dims(img,0))
print(img.shape)
predictions_single = model.predict(img)
print(predictions_single)
plot_value_array(0, predictions_single, test_labels)
_ = plt.xticks(range(10), class_names, rotation=45)
np.argmax(predictions_single[0])
| 0.85115 | 0.994724 |
```
import pandas as pd
import numpy as np
import re
import matplotlib.pyplot as plt
import seaborn as sns
train=pd.read_csv("train.csv")
test=pd.read_csv("test.csv")
full_data = [train, test]
train['Name_length'] = train['Name'].apply(len)
test['Name_length'] = test['Name'].apply(len)
train['Has_Cabin'] = train["Cabin"].apply(lambda x: 0 if type(x) == float else 1)
test['Has_Cabin'] = test["Cabin"].apply(lambda x: 0 if type(x) == float else 1)
for dataset in full_data:
dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1
for dataset in full_data:
dataset['IsAlone'] = 0
dataset.loc[dataset['FamilySize'] == 1, 'IsAlone'] = 1
for dataset in full_data:
dataset['Embarked'] = dataset['Embarked'].fillna('S')
for dataset in full_data:
dataset['Age'] = dataset['Age'].fillna(dataset['Age'].mean())
def get_title(name):
title_search = re.search(' ([A-Za-z]+)\.', name)
if title_search:
return title_search.group(1)
return ""
for dataset in full_data:
dataset['Title'] = dataset['Name'].apply(get_title)
for dataset in full_data:
dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')
dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')
for dataset in full_data:
title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5}
dataset['Title'] = dataset['Title'].map(title_mapping)
dataset['Title'] = dataset['Title'].fillna(0)
dataset.head()
dataset=dataset.drop(columns=['Name','Cabin','Sex','Embarked','Ticket','Fare','Age'])
colormap = plt.cm.RdBu
plt.figure(figsize=(14,12))
plt.title('Pearson Correlation of Features', y=1.05, size=15)
sns.heatmap(train.corr(),linewidths=0.1,vmax=1.0,
square=True, cmap=colormap, linecolor='white', annot=True)
train=train.drop(columns=['Name','Cabin','Sex','Embarked','Ticket','Fare','Age'],axis=1)
train.head()
train.isnull().sum()
test=test.drop(columns=['Name','Cabin','Sex','Embarked','Ticket','Fare','Age'])
test.head()
test.isnull().sum()
features=['Pclass', 'SibSp', 'Parch', 'Name_length',
'Has_Cabin', 'FamilySize', 'IsAlone', 'Title']
X=train[features]
y=train['Survived']
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
X_train.head()
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import classification_report, confusion_matrix
model = RandomForestClassifier(n_estimators=1000, min_samples_leaf=10, random_state=1)
model.fit(X_train, y_train)
predictions = model.predict(X_test)
print("Random Forrest Accuracy:",metrics.accuracy_score(y_test, predictions))
print("Mean Absoulte Error:", mean_absolute_error(predictions, y_test))
print("Classification Report:\n", classification_report(y_test, predictions))
print("Confusion Matrix:")
df = pd.DataFrame(
confusion_matrix(y_test, predictions),
index = [['actual', 'actual'], ['0','1']],
columns = [['predicted', 'predicted'], ['0','1']])
print(df)
pred_Test_Set = model.predict(test[features])
pred_Test_Set.size
test['Survived'] = pred_Test_Set
togo=['PassengerId','Survived']
testfinal=test[togo]
testfinal.to_csv('TitanicFinal.csv',index=False)
```
END
|
github_jupyter
|
import pandas as pd
import numpy as np
import re
import matplotlib.pyplot as plt
import seaborn as sns
train=pd.read_csv("train.csv")
test=pd.read_csv("test.csv")
full_data = [train, test]
train['Name_length'] = train['Name'].apply(len)
test['Name_length'] = test['Name'].apply(len)
train['Has_Cabin'] = train["Cabin"].apply(lambda x: 0 if type(x) == float else 1)
test['Has_Cabin'] = test["Cabin"].apply(lambda x: 0 if type(x) == float else 1)
for dataset in full_data:
dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1
for dataset in full_data:
dataset['IsAlone'] = 0
dataset.loc[dataset['FamilySize'] == 1, 'IsAlone'] = 1
for dataset in full_data:
dataset['Embarked'] = dataset['Embarked'].fillna('S')
for dataset in full_data:
dataset['Age'] = dataset['Age'].fillna(dataset['Age'].mean())
def get_title(name):
title_search = re.search(' ([A-Za-z]+)\.', name)
if title_search:
return title_search.group(1)
return ""
for dataset in full_data:
dataset['Title'] = dataset['Name'].apply(get_title)
for dataset in full_data:
dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')
dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')
for dataset in full_data:
title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5}
dataset['Title'] = dataset['Title'].map(title_mapping)
dataset['Title'] = dataset['Title'].fillna(0)
dataset.head()
dataset=dataset.drop(columns=['Name','Cabin','Sex','Embarked','Ticket','Fare','Age'])
colormap = plt.cm.RdBu
plt.figure(figsize=(14,12))
plt.title('Pearson Correlation of Features', y=1.05, size=15)
sns.heatmap(train.corr(),linewidths=0.1,vmax=1.0,
square=True, cmap=colormap, linecolor='white', annot=True)
train=train.drop(columns=['Name','Cabin','Sex','Embarked','Ticket','Fare','Age'],axis=1)
train.head()
train.isnull().sum()
test=test.drop(columns=['Name','Cabin','Sex','Embarked','Ticket','Fare','Age'])
test.head()
test.isnull().sum()
features=['Pclass', 'SibSp', 'Parch', 'Name_length',
'Has_Cabin', 'FamilySize', 'IsAlone', 'Title']
X=train[features]
y=train['Survived']
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
X_train.head()
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import classification_report, confusion_matrix
model = RandomForestClassifier(n_estimators=1000, min_samples_leaf=10, random_state=1)
model.fit(X_train, y_train)
predictions = model.predict(X_test)
print("Random Forrest Accuracy:",metrics.accuracy_score(y_test, predictions))
print("Mean Absoulte Error:", mean_absolute_error(predictions, y_test))
print("Classification Report:\n", classification_report(y_test, predictions))
print("Confusion Matrix:")
df = pd.DataFrame(
confusion_matrix(y_test, predictions),
index = [['actual', 'actual'], ['0','1']],
columns = [['predicted', 'predicted'], ['0','1']])
print(df)
pred_Test_Set = model.predict(test[features])
pred_Test_Set.size
test['Survived'] = pred_Test_Set
togo=['PassengerId','Survived']
testfinal=test[togo]
testfinal.to_csv('TitanicFinal.csv',index=False)
| 0.399109 | 0.46642 |
# Lib
```
import os
import pandas as pd
import numpy as np
import datetime as dt
from lifelines import KaplanMeierFitter
from lifelines.plotting import add_at_risk_counts
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
```
# Load data
```
fname_schema = "SCHEMA_21JAN2021_31AUG2021.parquet"
obito_pares = "PAREADOS_COM_INTERVALOS_OBITO_6.parquet"
hospital_pares = "PAREADOS_COM_INTERVALOS_HOSPITAL_6.parquet"
#surv_obito = "SURVIVAL_CORONAVAC_D1D2_OBITO_1.parquet"
#surv_hospital = "SURVIVAL_CORONAVAC_D1D2_HOSPITAL_1.parquet"
vaccine = "CORONAVAC"
data_folder = os.path.join("..", "output", "data")
pareado_folder = os.path.join("..", "output", "PAREAMENTO", vaccine)
fschema = pd.read_parquet(os.path.join(data_folder, fname_schema))
obito_df = pd.read_parquet(os.path.join(pareado_folder, obito_pares))
obito_df
pares = pd.read_parquet(os.path.join(pareado_folder, "EVENTOS_PAREADOS_1.parquet"))
condition = (pares["TIPO"]=="CASO") & (pd.notna(pares["DATA HOSPITALIZACAO"]) & (pares["PAREADO"]==True))
pares[condition]
condition = (pares["TIPO"]=="CONTROLE") & (pd.notna(pares["DATA HOSPITALIZACAO"]) & (pares["PAREADO"]==True))
pares[condition]
1335/1868
fsurv = obito.fsurvival
caso = fsurv[fsurv["TIPO"]=="CASO"]
controle = fsurv[fsurv["TIPO"]=="CONTROLE"]
caso.sample(n=10, replace=True)
```
## Lifelines tests
```
%run ../ve_estimate.py
fschema.columns
1-(241/342)
fschema_astra = fschema[fschema["VACINA APLICADA"]=="ASTRAZENECA"]
fschema_cor = fschema[fschema["VACINA APLICADA"]=="CORONAVAC"]
fschema_pfizer = fschema[fschema["VACINA APLICADA"]=="PFIZER"]
fschema_astra["DATA OBITO"].notnull().sum()
fschema_cor["DATA OBITO"].notnull().sum()
fschema_pfizer["DATA OBITO"].notnull().sum()
vacinad
fschema_astra["IDADE"].hist()
sub = fschema[fschema["CPF"].isin(hospital.sub_data["D2"]["CASO"]["CPF"])]
sub[pd.notna(sub["DATA HOSPITALIZACAO"])][["SITUACAO VACINEJA", "STATUS VACINACAO DURANTE COORTE"]]
212/433
df5 = pd.read_parquet(os.path.join(pareado_folder, "SURVIVAL", "SURVIVAL_CORONAVAC_D1D2_HOSPITAL_3.parquet"))
df6 = pd.read_parquet(os.path.join(pareado_folder, "SURVIVAL", "SURVIVAL_CORONAVAC_D1D2_HOSPITAL_6.parquet"))
df6["E - D2 HOSPITAL"].sum()
df5["E - D2 HOSPITAL"].sum()
obt = fschema[pd.notna(fschema["DATA OBITO"])]
obt["MESANO"] = obt["DATA OBITO"].apply(lambda x: f'{x.year}{x.month}')
obt["MESANO"].value_counts()
fschema[pd.notna(fschema["DATA UTI"])]["VACINA APLICADA"].value_counts()
fschema["GRUPO PRIORITARIO"].value_counts()
vacinados = pd.read_parquet(os.path.join("..", "..", "..", "data", "PARQUET_TRANSFORMED", "VACINADOS.parquet"))
lst = ["PROFISSIONAL DE SAUDE", "TRABALHADOR DA SAUDE"]
vac_saude = vacinados[vacinados["grupo prioritario(VACINADOS)"].isin(lst)]
fsaude = fschema[fschema["CPF"].isin(vac_saude["cpf(VACINADOS)"])]
fsaude[pd.notna(fsaude["DATA HOSPITALIZACAO"])]["DATA HOSPITALIZACAO"]
vacinados.columns
fschema["UTI"].value_counts()
fschema_uti = fschema[pd.notna(fschema["UTI"])]
s = fschema_uti[fschema_uti["UTI"].str.contains("SIM")][["DT_ENTUTI", "DATA D1", "DATA D2"]]
s.apply(lambda x: "NAO VACINADO" if x["DT_ENTUTI"]<x["DATA D1"] and x["DT_ENTUTI"]<x["DATA D2"] else ( "D1" if x["DT_ENTUTI"]>x["DATA D1"] and x["DT_ENTUTI"]<x["DATA D2"]))
s.info()
s["ENTUTI"] = s["DT_ENTUTI"].apply(lambda x: x.split(";"))
s["ENTUTI"] = s["ENTUTI"].apply(lambda x: [pd.to_datetime(xx) for xx in x])
def f(x):
for xx in x:
if pd.notna(xx):
return xx
s["ENTUTI"] = s["ENTUTI"].apply(f)
s["ENTUTI"]
s["STATUS"] = s.apply(lambda x: "NAO VACINADO" if x["ENTUTI"]<x["DATA D1"] and x["ENTUTI"]<x["DATA D2"] else ("D1" if x["ENTUTI"]>x["DATA D1"] and x["ENTUTI"]<x["DATA D2"] else ("D2" if x["ENTUTI"]>x["DATA D1"] and x["ENTUTI"]>x["DATA D2"] else "D1")), axis=1)
s["STATUS"].value_counts()
fschema["DT_ENTUTI"] = fschema["DT_ENTUTI"].apply(lambda x: [pd.to_datetime(xx) for xx in x.split(";")] if pd.notna(x) else np.nan)
fschema[pd.notna(fschema["DT_ENTUTI"])]["DT_ENTUTI"]
fschema["DT_ENTUTI"] = fschema["DT_ENTUTI"].apply(lambda x: x if not np.all(pd.isna(x)) else np.nan)
def new_hospitalization_date(x, cohort):
'''
'''
if not np.any(pd.notna(x)):
return np.nan
x = np.sort([xx for xx in x if pd.notna(xx)])
condition = (x>=cohort[0]) & (x<=cohort[1])
if x[condition].shape[0]>0:
return x[condition][0]
else:
return np.nan
cohort = (dt.datetime(2021, 1, 21), dt.datetime(2021, 8, 31))
fschema["DATA UTI"] = fschema["DT_ENTUTI"].apply(lambda x: new_hospitalization_date(x, cohort))
fschema[pd.notna(fschema["DATA UTI"])]["DATA UTI"]
fschema["BAIRRO"].value_counts()
```
|
github_jupyter
|
import os
import pandas as pd
import numpy as np
import datetime as dt
from lifelines import KaplanMeierFitter
from lifelines.plotting import add_at_risk_counts
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
fname_schema = "SCHEMA_21JAN2021_31AUG2021.parquet"
obito_pares = "PAREADOS_COM_INTERVALOS_OBITO_6.parquet"
hospital_pares = "PAREADOS_COM_INTERVALOS_HOSPITAL_6.parquet"
#surv_obito = "SURVIVAL_CORONAVAC_D1D2_OBITO_1.parquet"
#surv_hospital = "SURVIVAL_CORONAVAC_D1D2_HOSPITAL_1.parquet"
vaccine = "CORONAVAC"
data_folder = os.path.join("..", "output", "data")
pareado_folder = os.path.join("..", "output", "PAREAMENTO", vaccine)
fschema = pd.read_parquet(os.path.join(data_folder, fname_schema))
obito_df = pd.read_parquet(os.path.join(pareado_folder, obito_pares))
obito_df
pares = pd.read_parquet(os.path.join(pareado_folder, "EVENTOS_PAREADOS_1.parquet"))
condition = (pares["TIPO"]=="CASO") & (pd.notna(pares["DATA HOSPITALIZACAO"]) & (pares["PAREADO"]==True))
pares[condition]
condition = (pares["TIPO"]=="CONTROLE") & (pd.notna(pares["DATA HOSPITALIZACAO"]) & (pares["PAREADO"]==True))
pares[condition]
1335/1868
fsurv = obito.fsurvival
caso = fsurv[fsurv["TIPO"]=="CASO"]
controle = fsurv[fsurv["TIPO"]=="CONTROLE"]
caso.sample(n=10, replace=True)
%run ../ve_estimate.py
fschema.columns
1-(241/342)
fschema_astra = fschema[fschema["VACINA APLICADA"]=="ASTRAZENECA"]
fschema_cor = fschema[fschema["VACINA APLICADA"]=="CORONAVAC"]
fschema_pfizer = fschema[fschema["VACINA APLICADA"]=="PFIZER"]
fschema_astra["DATA OBITO"].notnull().sum()
fschema_cor["DATA OBITO"].notnull().sum()
fschema_pfizer["DATA OBITO"].notnull().sum()
vacinad
fschema_astra["IDADE"].hist()
sub = fschema[fschema["CPF"].isin(hospital.sub_data["D2"]["CASO"]["CPF"])]
sub[pd.notna(sub["DATA HOSPITALIZACAO"])][["SITUACAO VACINEJA", "STATUS VACINACAO DURANTE COORTE"]]
212/433
df5 = pd.read_parquet(os.path.join(pareado_folder, "SURVIVAL", "SURVIVAL_CORONAVAC_D1D2_HOSPITAL_3.parquet"))
df6 = pd.read_parquet(os.path.join(pareado_folder, "SURVIVAL", "SURVIVAL_CORONAVAC_D1D2_HOSPITAL_6.parquet"))
df6["E - D2 HOSPITAL"].sum()
df5["E - D2 HOSPITAL"].sum()
obt = fschema[pd.notna(fschema["DATA OBITO"])]
obt["MESANO"] = obt["DATA OBITO"].apply(lambda x: f'{x.year}{x.month}')
obt["MESANO"].value_counts()
fschema[pd.notna(fschema["DATA UTI"])]["VACINA APLICADA"].value_counts()
fschema["GRUPO PRIORITARIO"].value_counts()
vacinados = pd.read_parquet(os.path.join("..", "..", "..", "data", "PARQUET_TRANSFORMED", "VACINADOS.parquet"))
lst = ["PROFISSIONAL DE SAUDE", "TRABALHADOR DA SAUDE"]
vac_saude = vacinados[vacinados["grupo prioritario(VACINADOS)"].isin(lst)]
fsaude = fschema[fschema["CPF"].isin(vac_saude["cpf(VACINADOS)"])]
fsaude[pd.notna(fsaude["DATA HOSPITALIZACAO"])]["DATA HOSPITALIZACAO"]
vacinados.columns
fschema["UTI"].value_counts()
fschema_uti = fschema[pd.notna(fschema["UTI"])]
s = fschema_uti[fschema_uti["UTI"].str.contains("SIM")][["DT_ENTUTI", "DATA D1", "DATA D2"]]
s.apply(lambda x: "NAO VACINADO" if x["DT_ENTUTI"]<x["DATA D1"] and x["DT_ENTUTI"]<x["DATA D2"] else ( "D1" if x["DT_ENTUTI"]>x["DATA D1"] and x["DT_ENTUTI"]<x["DATA D2"]))
s.info()
s["ENTUTI"] = s["DT_ENTUTI"].apply(lambda x: x.split(";"))
s["ENTUTI"] = s["ENTUTI"].apply(lambda x: [pd.to_datetime(xx) for xx in x])
def f(x):
for xx in x:
if pd.notna(xx):
return xx
s["ENTUTI"] = s["ENTUTI"].apply(f)
s["ENTUTI"]
s["STATUS"] = s.apply(lambda x: "NAO VACINADO" if x["ENTUTI"]<x["DATA D1"] and x["ENTUTI"]<x["DATA D2"] else ("D1" if x["ENTUTI"]>x["DATA D1"] and x["ENTUTI"]<x["DATA D2"] else ("D2" if x["ENTUTI"]>x["DATA D1"] and x["ENTUTI"]>x["DATA D2"] else "D1")), axis=1)
s["STATUS"].value_counts()
fschema["DT_ENTUTI"] = fschema["DT_ENTUTI"].apply(lambda x: [pd.to_datetime(xx) for xx in x.split(";")] if pd.notna(x) else np.nan)
fschema[pd.notna(fschema["DT_ENTUTI"])]["DT_ENTUTI"]
fschema["DT_ENTUTI"] = fschema["DT_ENTUTI"].apply(lambda x: x if not np.all(pd.isna(x)) else np.nan)
def new_hospitalization_date(x, cohort):
'''
'''
if not np.any(pd.notna(x)):
return np.nan
x = np.sort([xx for xx in x if pd.notna(xx)])
condition = (x>=cohort[0]) & (x<=cohort[1])
if x[condition].shape[0]>0:
return x[condition][0]
else:
return np.nan
cohort = (dt.datetime(2021, 1, 21), dt.datetime(2021, 8, 31))
fschema["DATA UTI"] = fschema["DT_ENTUTI"].apply(lambda x: new_hospitalization_date(x, cohort))
fschema[pd.notna(fschema["DATA UTI"])]["DATA UTI"]
fschema["BAIRRO"].value_counts()
| 0.104557 | 0.52074 |
# Scrapping Youtube Data with Youtube API
## Setup
### Library
```
import configparser
from googleapiclient.discovery import build
import pandas as pd
```
### Important Variables
```
config_file = 'youtube.ini'
config = configparser.ConfigParser()
config.read(config_file)
API_KEY = config['youtube']['api_key']
basic_file = 'hololive.xlsx'
df_basic = pd.read_excel(basic_file)
channels = df_basic['channel_id']
```
### Request to Youtube API
```
service_name = 'youtube'
service_version = 'v3'
youtube = build(serviceName=service_name, version=service_version, developerKey=API_KEY)
```
### Request Channels Info
```
request = youtube.channels().list(
part = "id,snippet,contentDetails,statistics",
id = ','.join(channels)
)
response = request.execute()
```
### Select Data from Response
```
all_data = []
for respon in response['items']:
data = dict(
channel_id = respon['id'],
channel_name = respon['snippet']['title'],
subscribers = respon['statistics']['subscriberCount'],
views = respon['statistics']['viewCount'],
total_videos = respon['statistics']['videoCount'],
playlist_id =respon['contentDetails']['relatedPlaylists']['uploads']
)
all_data.append(data)
df_holo = pd.DataFrame(all_data)
df_holo.head()
```
### Merge Basic Info Data with Scrapped Data from Youtube
```
df = pd.merge(df_basic, df_holo, on="channel_id")
# Change the data type to numeric
df['subscribers'] = pd.to_numeric(df['subscribers'])
df['views'] = pd.to_numeric(df['views'])
df['total_videos'] = pd.to_numeric(df['total_videos'])
df.head()
```
### Request Video ID
```
def get_video_ids(youtube, playlist_id):
request = youtube.playlistItems().list(
part='contentDetails',
playlistId = playlist_id,
maxResults = 50)
response = request.execute()
video_ids = [item['contentDetails']['videoId'] for item in response['items']]
next_page_token = response.get('nextPageToken')
more_pages = True
while more_pages:
if next_page_token is None:
more_pages = False
else:
request = youtube.playlistItems().list(
part='contentDetails',
playlistId = playlist_id,
maxResults = 50,
pageToken = next_page_token)
response = request.execute()
for i in range(len(response['items'])):
video_ids.append(response['items'][i]['contentDetails']['videoId'])
next_page_token = response.get('nextPageToken')
return video_ids
vid_ids = [get_video_ids(youtube, playlist) for playlist in df['playlist_id']]
```
### Request Video Info
```
def get_video_details(youtube, playlistid, video_ids, list_stats = []):
all_video_stats = list_stats
for i in range(0, len(video_ids), 50):
request = youtube.videos().list(
part='id,contentDetails,snippet,statistics',
id=','.join(video_ids[i:i+50]))
response = request.execute()
for video in response['items']:
video_stats = dict(channel_id = playlistid,
video_id = video['id'],
title = video['snippet']['title'],
published_date = video['snippet']['publishedAt'],
duration = video['contentDetails']['duration'],
views = video['statistics']['viewCount'],
comments = video['statistics'].get('commentCount'),
likes = video['statistics'].get('likeCount'),
dislikes = video['statistics'].get('dislikeCount'))
all_video_stats.append(video_stats)
return all_video_stats
data_list =[]
for i in range(len(vid_ids)):
data_list = get_video_details(youtube, df.iloc[i]['channel_id'], vid_ids[i], data_list)
df_videos = pd.DataFrame(data_list)
df_videos
df_videos.isna().sum()
df_videos.to_csv('hololive_videos.csv', index=False)
df_videos2 = pd.read_csv('hololive_videos.csv')
df_videos2.dtypes
df_videos2['published_date'] = pd.to_datetime(df_videos2['published_date'])
df_videos2.to_csv('hololive_videos.csv', index=False)
```
### Most Watched Videos
```
df_videos2.loc[df_videos2['views'] == df_videos2['views'].max()]
```
|
github_jupyter
|
import configparser
from googleapiclient.discovery import build
import pandas as pd
config_file = 'youtube.ini'
config = configparser.ConfigParser()
config.read(config_file)
API_KEY = config['youtube']['api_key']
basic_file = 'hololive.xlsx'
df_basic = pd.read_excel(basic_file)
channels = df_basic['channel_id']
service_name = 'youtube'
service_version = 'v3'
youtube = build(serviceName=service_name, version=service_version, developerKey=API_KEY)
request = youtube.channels().list(
part = "id,snippet,contentDetails,statistics",
id = ','.join(channels)
)
response = request.execute()
all_data = []
for respon in response['items']:
data = dict(
channel_id = respon['id'],
channel_name = respon['snippet']['title'],
subscribers = respon['statistics']['subscriberCount'],
views = respon['statistics']['viewCount'],
total_videos = respon['statistics']['videoCount'],
playlist_id =respon['contentDetails']['relatedPlaylists']['uploads']
)
all_data.append(data)
df_holo = pd.DataFrame(all_data)
df_holo.head()
df = pd.merge(df_basic, df_holo, on="channel_id")
# Change the data type to numeric
df['subscribers'] = pd.to_numeric(df['subscribers'])
df['views'] = pd.to_numeric(df['views'])
df['total_videos'] = pd.to_numeric(df['total_videos'])
df.head()
def get_video_ids(youtube, playlist_id):
request = youtube.playlistItems().list(
part='contentDetails',
playlistId = playlist_id,
maxResults = 50)
response = request.execute()
video_ids = [item['contentDetails']['videoId'] for item in response['items']]
next_page_token = response.get('nextPageToken')
more_pages = True
while more_pages:
if next_page_token is None:
more_pages = False
else:
request = youtube.playlistItems().list(
part='contentDetails',
playlistId = playlist_id,
maxResults = 50,
pageToken = next_page_token)
response = request.execute()
for i in range(len(response['items'])):
video_ids.append(response['items'][i]['contentDetails']['videoId'])
next_page_token = response.get('nextPageToken')
return video_ids
vid_ids = [get_video_ids(youtube, playlist) for playlist in df['playlist_id']]
def get_video_details(youtube, playlistid, video_ids, list_stats = []):
all_video_stats = list_stats
for i in range(0, len(video_ids), 50):
request = youtube.videos().list(
part='id,contentDetails,snippet,statistics',
id=','.join(video_ids[i:i+50]))
response = request.execute()
for video in response['items']:
video_stats = dict(channel_id = playlistid,
video_id = video['id'],
title = video['snippet']['title'],
published_date = video['snippet']['publishedAt'],
duration = video['contentDetails']['duration'],
views = video['statistics']['viewCount'],
comments = video['statistics'].get('commentCount'),
likes = video['statistics'].get('likeCount'),
dislikes = video['statistics'].get('dislikeCount'))
all_video_stats.append(video_stats)
return all_video_stats
data_list =[]
for i in range(len(vid_ids)):
data_list = get_video_details(youtube, df.iloc[i]['channel_id'], vid_ids[i], data_list)
df_videos = pd.DataFrame(data_list)
df_videos
df_videos.isna().sum()
df_videos.to_csv('hololive_videos.csv', index=False)
df_videos2 = pd.read_csv('hololive_videos.csv')
df_videos2.dtypes
df_videos2['published_date'] = pd.to_datetime(df_videos2['published_date'])
df_videos2.to_csv('hololive_videos.csv', index=False)
df_videos2.loc[df_videos2['views'] == df_videos2['views'].max()]
| 0.183009 | 0.506897 |
# Lecture 5: Exercise Solutions
## Exercise 2: Sets
Write a function that finds the overlap of two sets and prints them.
Initialize two sets, e.g., with values {13, 25, 37, 45, 13} and {14, 25, 38, 8, 45} and call this function with them.
The manual way to do this:
```
def overlap(set1, set2):
set_overlap = []
for k in set1:
if k in set2:
set_overlap.append(k)
print (set_overlap)
set1 = {13, 25, 37, 45, 13}
set2 = {14, 25, 38, 8, 45}
overlap(set1, set2)
```
The better way, knowing about the methods the set provides:
```
def intersection(set1, set2):
print(set1.intersection(set2))
intersection(set1, set2)
```
## Exercise 3: Dictionaries
* Create a dictionary with two-letter codes of two US states and the full names, e.g., UT: Utah, NY: New York
* After initially creating the dictionary, add two more states to the dictionary.
* Create a second dictionary that maps the state codes to an array ofities in that state, e.g., UT: [Salt Lake City, Ogden, Provo, St.George].
* Write a function that takes a state code and prints the full name of the state and lists the cities in that state.
```
states = {"UT":"Utah", "NY":"New York"}
states["CA"] = "California"
states["MA"] = "Massachusetts"
cities = {"UT":["Salt Lake City", "Ogden", "Provo", "St. George"], "NY":["Albany", "New York City", "Buffalo"]}
def print_state_info(code):
if code in states and code in cities:
print (states[code] + ": "+ str(cities[code]))
else:
print ("No information for state code " + code)
print_state_info("UT")
print_state_info("NY")
print_state_info("WA")
```
## Exercise 5: Pandas Series
Create a new pandas series with the lists given below that contain NFL team names and the number of Super Bowl titles they won. Use the names as indices, the wins as the data.
* Once the list is created, sort the series alphabetically by index.
* Print an overview of the statistical properties of the series. What's the mean number of wins?
* Filter out all teams that have won less than four Super Bowl titles
* A football team has 45 players. Update the series so that instead of the number of titles, it reflects the number of Super Bowl rings given to the players.
* Assume that each ring costs USD 30,000. Update the series so that it contains a string of the dollar amount including the \$ sign. For the Steelers, for example, this would correspond to:
```
Pittsburgh Steelers $ 8100000
```
```
teams = ["New England Patriots",
"Pittsburgh Steelers",
"Dallas Cowboys",
"San Francisco 49ers",
"Green Bay Packers",
"New York Giants",
"Denver Broncos",
"Oakland/Los Angeles Raiders",
"Washington Redskins",
"Miami Dolphins",
"Baltimore/Indianapolis Colts",
"Baltimore Ravens"]
wins = [6,6,5,5,4,4,3,3,3,2,2,2]
import pandas as pd
winning_teams = pd.Series(wins, name="Superbowl-winning NFL Teams", index=teams)
winning_teams
winning_teams.sort_index()
winning_teams.describe()
winning_teams[winning_teams > 3]
rings = winning_teams * 45
rings
def to_dollar(rings):
return "$ " + str(rings * 30000)
rings.map(to_dollar)
```
|
github_jupyter
|
def overlap(set1, set2):
set_overlap = []
for k in set1:
if k in set2:
set_overlap.append(k)
print (set_overlap)
set1 = {13, 25, 37, 45, 13}
set2 = {14, 25, 38, 8, 45}
overlap(set1, set2)
def intersection(set1, set2):
print(set1.intersection(set2))
intersection(set1, set2)
states = {"UT":"Utah", "NY":"New York"}
states["CA"] = "California"
states["MA"] = "Massachusetts"
cities = {"UT":["Salt Lake City", "Ogden", "Provo", "St. George"], "NY":["Albany", "New York City", "Buffalo"]}
def print_state_info(code):
if code in states and code in cities:
print (states[code] + ": "+ str(cities[code]))
else:
print ("No information for state code " + code)
print_state_info("UT")
print_state_info("NY")
print_state_info("WA")
Pittsburgh Steelers $ 8100000
```
| 0.186132 | 0.973544 |
```
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.dates as mdates
from datetime import date
from datetime import datetime
import numpy as np
from summer.utils import ref_times_to_dti
from autumn.tools.inputs.demography.queries import get_population_by_agegroup
from autumn.models.sm_sir.detection import create_cdr_function
from autumn.tools.utils.utils import apply_moving_average
from autumn.tools.curve.scale_up import scale_up_function
from autumn.tools.project import get_project
from autumn.settings import Region, Models
from autumn.models.covid_19.constants import AGEGROUP_STRATA, GOOGLE_MOBILITY_LOCATIONS
from autumn.settings.constants import COVID_BASE_DATETIME
from autumn.models.sm_sir.mixing_matrix.macrodistancing import weight_mobility_data, get_mobility_data
from autumn.tools.plots.utils import REF_DATE
from autumn.tools import inputs
from autumn.tools.inputs.database import get_input_db
from autumn.tools.utils.display import pretty_print
from autumn.tools.inputs.social_mixing.build_synthetic_matrices import build_synthetic_matrices
from autumn.models.covid_19.detection import get_testing_numbers_for_region
plt.style.use('ggplot')
age_integers = [int(group) for group in AGEGROUP_STRATA]
model = Models.SM_SIR
region = Region.BHUTAN
project = get_project(model, region, reload=True)
base_params = project.param_set.baseline
```
# Population
```
fig, ax = plt.subplots(1, 1, figsize=(10, 6))
age_groups = base_params["age_groups"]
total_pops = inputs.get_population_by_agegroup(
age_groups,
base_params["country"]["iso3"],
None,
# "FDMN",
year=base_params["population"]["year"]
)
print(f"total modelled population of {region} is: {round(sum(total_pops) / 1e3, 3)} thousand")
ax.bar(age_groups, total_pops, width=4)
ax.set_title(region)
ax.set_ylabel("population")
ax.set_xlabel("starting age of age bracket")
fig.suptitle("population distribution by age")
```
# Mobility
```
print(f"Whether mobility effects are turned on: {base_params['is_dynamic_mixing_matrix']}")
times
y_upper = 2.
# Collate data together
input_db = get_input_db()
mob_df = get_mobility_data("BTN", region=False,base_date = COVID_BASE_DATETIME )[0]
times = mob_df["date"].to_list()
google_mob_df = weight_mobility_data(mob_df, project.param_set.baseline["mobility"]["google_mobility_locations"])
# Get plots ready
mob_fig, mob_axes = plt.subplots(1, 2, figsize=(12, 6))
plot_left_date = date(2020, 1, 1)
plot_right_date = times[-1] # Not sure why this is necessary
# Plot raw mobility data
ax = mob_axes[0]
for mobility_domain in GOOGLE_MOBILITY_LOCATIONS:
ax.plot(times, mob_df[mobility_domain], label=mobility_domain)
ax.set_ylim((0., y_upper))
ax.tick_params(axis="x", labelrotation=45)
ax.set_title("raw Google mobility domains")
ax.legend(loc="upper right")
ax.set_xlim(left=plot_left_date, right=plot_right_date)
# Plot processed mobility data
ax = mob_axes[1]
for location in list(project.param_set.baseline["mobility"]["google_mobility_locations"].keys()):
ax.plot(times, google_mob_df[location], label=location)
ax.tick_params(axis="x", labelrotation=45)
ax.set_ylim((0., y_upper))
ax.legend(loc="upper left")
ax.set_title("mobility as implemented in the model")
mob_fig.tight_layout(w_pad=1.5, h_pad=3.5)
ax.set_xlim(left=plot_left_date, right=plot_right_date)
```
# Mixing matrix
```
print(f"Modelled country: {project.param_set.baseline['country']['iso3']}")
print(f"Modelled sub-region: {project.param_set.baseline['population']['region']}")
print(f"Proxy country: {project.param_set.baseline['ref_mixing_iso3']}")
print("Always age-adjusted under SM-SIR code")
agegroup_types = {
"base age groups": AGEGROUP_STRATA,
"modelled age groups": project.param_set.baseline["age_groups"],
}
for title, agegroups in agegroup_types.items():
mixing_matrix = build_synthetic_matrices(
project.param_set.baseline["country"]["iso3"],
project.param_set.baseline["ref_mixing_iso3"],
agegroups,
True,
project.param_set.baseline["population"]["region"]
)
fig = plt.figure(figsize=(12, 8))
positions = [1, 2, 3, 5, 6]
for i_loc, location in zip(positions, mixing_matrix.keys()):
ax = fig.add_subplot(2, 3, i_loc)
ax.imshow(
np.flipud(np.transpose(mixing_matrix[location])),
cmap=cm.hot,
vmin=0,
vmax=mixing_matrix[location].max(),
origin="lower"
)
ax.set_title(location.replace("_", " "))
ax.set_xticks([])
ax.set_yticks([])
fig.suptitle(title)
```
# Case detection
```
testing_params = project.param_set.baseline["testing_to_detection"]
print("Parameter values are: ")
pretty_print(testing_params)
# Get the CDR function of tests
cdr_from_tests_func = create_cdr_function(
testing_params["assumed_tests_parameter"],
testing_params["assumed_cdr_parameter"],
)
# Get the denominator population
testing_pops = get_population_by_agegroup(
project.param_set.baseline["age_groups"],
project.param_set.baseline["country"]["iso3"],
project.param_set.baseline["population"]["region"]
)
# Process the data
test_times, test_values = get_testing_numbers_for_region("BTN","Bhutan")
test_dates = ref_times_to_dti(COVID_BASE_DATETIME, [int(time) for time in test_times])
per_capita_tests = [i_tests / sum(testing_pops) for i_tests in test_values]
dummy_tests = np.linspace(0, max(per_capita_tests), 200)
if testing_params["assumed_tests_parameter"]:
smoothed_per_capita_tests = apply_moving_average(
per_capita_tests,
testing_params["smoothing_period"]
)
else:
smoothed_per_capita_tests = per_capita_tests
cdr_function_of_time = scale_up_function(
test_times,
[cdr_from_tests_func(test_rate) for test_rate in smoothed_per_capita_tests],
smoothness=0.2, method=4, bound_low=0.,
)
# Plot
fig, axes = plt.subplots(2, 2, figsize=(12, 8))
fig.tight_layout(w_pad=1.5, h_pad=5)
def sort_axis_dates(ax):
axis.tick_params(axis="x", labelrotation=45)
#axis.set_xlim(left=plot_left_date, right=plot_right_date)
# Plot daily number of tests
axis = axes[0, 0]
axis.plot(test_dates, test_values, marker="o")
axis.set_title("daily testing numbers")
sort_axis_dates(axis)
# Plot daily number of tests
axis = axes[0, 1]
axis.plot(test_dates, per_capita_tests, label="raw")
axis.plot(test_dates, smoothed_per_capita_tests, label="smoothed")
axis.set_title("daily per capita testing rate")
sort_axis_dates(axis)
axis.legend()
# Plot relationship of daily tests to CDR proportion
axis = axes[1, 0]
axis.plot(dummy_tests, cdr_from_tests_func(dummy_tests))
axis.scatter(per_capita_tests, [cdr_from_tests_func(i_tests) for i_tests in per_capita_tests], color="r")
axis.set_ylabel("case detection proportion")
axis.set_xlabel("per capita testing rate")
axis.set_title("daily per capita tests to CDR relationship")
axis.set_ylim(top=1.)
# Plot CDR values
axis = axes[1, 1]
multiplier = 1e2
axis.scatter(test_dates, [cdr_from_tests_func(i_test_rate) * multiplier for i_test_rate in smoothed_per_capita_tests], color="r")
axis.plot(test_dates, [cdr_function_of_time(time) * multiplier for time in test_times])
axis.set_title("Final case detection rate")
axis.set_ylabel("percentage")
sort_axis_dates(axis)
fig.tight_layout()
```
|
github_jupyter
|
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.dates as mdates
from datetime import date
from datetime import datetime
import numpy as np
from summer.utils import ref_times_to_dti
from autumn.tools.inputs.demography.queries import get_population_by_agegroup
from autumn.models.sm_sir.detection import create_cdr_function
from autumn.tools.utils.utils import apply_moving_average
from autumn.tools.curve.scale_up import scale_up_function
from autumn.tools.project import get_project
from autumn.settings import Region, Models
from autumn.models.covid_19.constants import AGEGROUP_STRATA, GOOGLE_MOBILITY_LOCATIONS
from autumn.settings.constants import COVID_BASE_DATETIME
from autumn.models.sm_sir.mixing_matrix.macrodistancing import weight_mobility_data, get_mobility_data
from autumn.tools.plots.utils import REF_DATE
from autumn.tools import inputs
from autumn.tools.inputs.database import get_input_db
from autumn.tools.utils.display import pretty_print
from autumn.tools.inputs.social_mixing.build_synthetic_matrices import build_synthetic_matrices
from autumn.models.covid_19.detection import get_testing_numbers_for_region
plt.style.use('ggplot')
age_integers = [int(group) for group in AGEGROUP_STRATA]
model = Models.SM_SIR
region = Region.BHUTAN
project = get_project(model, region, reload=True)
base_params = project.param_set.baseline
fig, ax = plt.subplots(1, 1, figsize=(10, 6))
age_groups = base_params["age_groups"]
total_pops = inputs.get_population_by_agegroup(
age_groups,
base_params["country"]["iso3"],
None,
# "FDMN",
year=base_params["population"]["year"]
)
print(f"total modelled population of {region} is: {round(sum(total_pops) / 1e3, 3)} thousand")
ax.bar(age_groups, total_pops, width=4)
ax.set_title(region)
ax.set_ylabel("population")
ax.set_xlabel("starting age of age bracket")
fig.suptitle("population distribution by age")
print(f"Whether mobility effects are turned on: {base_params['is_dynamic_mixing_matrix']}")
times
y_upper = 2.
# Collate data together
input_db = get_input_db()
mob_df = get_mobility_data("BTN", region=False,base_date = COVID_BASE_DATETIME )[0]
times = mob_df["date"].to_list()
google_mob_df = weight_mobility_data(mob_df, project.param_set.baseline["mobility"]["google_mobility_locations"])
# Get plots ready
mob_fig, mob_axes = plt.subplots(1, 2, figsize=(12, 6))
plot_left_date = date(2020, 1, 1)
plot_right_date = times[-1] # Not sure why this is necessary
# Plot raw mobility data
ax = mob_axes[0]
for mobility_domain in GOOGLE_MOBILITY_LOCATIONS:
ax.plot(times, mob_df[mobility_domain], label=mobility_domain)
ax.set_ylim((0., y_upper))
ax.tick_params(axis="x", labelrotation=45)
ax.set_title("raw Google mobility domains")
ax.legend(loc="upper right")
ax.set_xlim(left=plot_left_date, right=plot_right_date)
# Plot processed mobility data
ax = mob_axes[1]
for location in list(project.param_set.baseline["mobility"]["google_mobility_locations"].keys()):
ax.plot(times, google_mob_df[location], label=location)
ax.tick_params(axis="x", labelrotation=45)
ax.set_ylim((0., y_upper))
ax.legend(loc="upper left")
ax.set_title("mobility as implemented in the model")
mob_fig.tight_layout(w_pad=1.5, h_pad=3.5)
ax.set_xlim(left=plot_left_date, right=plot_right_date)
print(f"Modelled country: {project.param_set.baseline['country']['iso3']}")
print(f"Modelled sub-region: {project.param_set.baseline['population']['region']}")
print(f"Proxy country: {project.param_set.baseline['ref_mixing_iso3']}")
print("Always age-adjusted under SM-SIR code")
agegroup_types = {
"base age groups": AGEGROUP_STRATA,
"modelled age groups": project.param_set.baseline["age_groups"],
}
for title, agegroups in agegroup_types.items():
mixing_matrix = build_synthetic_matrices(
project.param_set.baseline["country"]["iso3"],
project.param_set.baseline["ref_mixing_iso3"],
agegroups,
True,
project.param_set.baseline["population"]["region"]
)
fig = plt.figure(figsize=(12, 8))
positions = [1, 2, 3, 5, 6]
for i_loc, location in zip(positions, mixing_matrix.keys()):
ax = fig.add_subplot(2, 3, i_loc)
ax.imshow(
np.flipud(np.transpose(mixing_matrix[location])),
cmap=cm.hot,
vmin=0,
vmax=mixing_matrix[location].max(),
origin="lower"
)
ax.set_title(location.replace("_", " "))
ax.set_xticks([])
ax.set_yticks([])
fig.suptitle(title)
testing_params = project.param_set.baseline["testing_to_detection"]
print("Parameter values are: ")
pretty_print(testing_params)
# Get the CDR function of tests
cdr_from_tests_func = create_cdr_function(
testing_params["assumed_tests_parameter"],
testing_params["assumed_cdr_parameter"],
)
# Get the denominator population
testing_pops = get_population_by_agegroup(
project.param_set.baseline["age_groups"],
project.param_set.baseline["country"]["iso3"],
project.param_set.baseline["population"]["region"]
)
# Process the data
test_times, test_values = get_testing_numbers_for_region("BTN","Bhutan")
test_dates = ref_times_to_dti(COVID_BASE_DATETIME, [int(time) for time in test_times])
per_capita_tests = [i_tests / sum(testing_pops) for i_tests in test_values]
dummy_tests = np.linspace(0, max(per_capita_tests), 200)
if testing_params["assumed_tests_parameter"]:
smoothed_per_capita_tests = apply_moving_average(
per_capita_tests,
testing_params["smoothing_period"]
)
else:
smoothed_per_capita_tests = per_capita_tests
cdr_function_of_time = scale_up_function(
test_times,
[cdr_from_tests_func(test_rate) for test_rate in smoothed_per_capita_tests],
smoothness=0.2, method=4, bound_low=0.,
)
# Plot
fig, axes = plt.subplots(2, 2, figsize=(12, 8))
fig.tight_layout(w_pad=1.5, h_pad=5)
def sort_axis_dates(ax):
axis.tick_params(axis="x", labelrotation=45)
#axis.set_xlim(left=plot_left_date, right=plot_right_date)
# Plot daily number of tests
axis = axes[0, 0]
axis.plot(test_dates, test_values, marker="o")
axis.set_title("daily testing numbers")
sort_axis_dates(axis)
# Plot daily number of tests
axis = axes[0, 1]
axis.plot(test_dates, per_capita_tests, label="raw")
axis.plot(test_dates, smoothed_per_capita_tests, label="smoothed")
axis.set_title("daily per capita testing rate")
sort_axis_dates(axis)
axis.legend()
# Plot relationship of daily tests to CDR proportion
axis = axes[1, 0]
axis.plot(dummy_tests, cdr_from_tests_func(dummy_tests))
axis.scatter(per_capita_tests, [cdr_from_tests_func(i_tests) for i_tests in per_capita_tests], color="r")
axis.set_ylabel("case detection proportion")
axis.set_xlabel("per capita testing rate")
axis.set_title("daily per capita tests to CDR relationship")
axis.set_ylim(top=1.)
# Plot CDR values
axis = axes[1, 1]
multiplier = 1e2
axis.scatter(test_dates, [cdr_from_tests_func(i_test_rate) * multiplier for i_test_rate in smoothed_per_capita_tests], color="r")
axis.plot(test_dates, [cdr_function_of_time(time) * multiplier for time in test_times])
axis.set_title("Final case detection rate")
axis.set_ylabel("percentage")
sort_axis_dates(axis)
fig.tight_layout()
| 0.591487 | 0.693625 |
<a href="https://colab.research.google.com/github/lvisdd/object_detection_tutorial/blob/master/DeepLab_Demo.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# [Tensorflow Object Detection API](https://github.com/tensorflow/models/tree/master/research/object_detection)
```
!git clone https://github.com/tensorflow/models.git
```
# COCO API installation
```
!git clone https://github.com/cocodataset/cocoapi.git
%cd cocoapi/PythonAPI
!make
!cp -r pycocotools /content/models/research/
```
# Protobuf Compilation
```
%cd /content/models/research/
!protoc object_detection/protos/*.proto --python_out=.
```
# Add Libraries to PYTHONPATH
```
%cd /content/models/research/
%env PYTHONPATH=/env/python:/content/models/research:/content/models/research/slim:/content/models/research/object_detection
%env
```
# Testing the Installation
```
!python object_detection/builders/model_builder_test.py
%cd /content/models/research/object_detection
```
# [DeepLab: Deep Labelling for Semantic Image Segmentation](https://github.com/tensorflow/models/tree/master/research/deeplab)
## Import Libraries
```
import os
from io import BytesIO
import tarfile
import tempfile
from six.moves import urllib
from matplotlib import gridspec
from matplotlib import pyplot as plt
import numpy as np
from PIL import Image
import tensorflow as tf
```
## Import helper methods
These methods help us perform the following tasks:
* Load the latest version of the pretrained DeepLab model
* Load the colormap from the PASCAL VOC dataset
* Adds colors to various labels, such as "pink" for people, "green" for bicycle and more
* Visualize an image, and add an overlay of colors on various regions
```
class DeepLabModel(object):
"""Class to load deeplab model and run inference."""
INPUT_TENSOR_NAME = 'ImageTensor:0'
OUTPUT_TENSOR_NAME = 'SemanticPredictions:0'
INPUT_SIZE = 513
FROZEN_GRAPH_NAME = 'frozen_inference_graph'
def __init__(self, tarball_path):
"""Creates and loads pretrained deeplab model."""
self.graph = tf.Graph()
graph_def = None
# Extract frozen graph from tar archive.
tar_file = tarfile.open(tarball_path)
for tar_info in tar_file.getmembers():
if self.FROZEN_GRAPH_NAME in os.path.basename(tar_info.name):
file_handle = tar_file.extractfile(tar_info)
graph_def = tf.GraphDef.FromString(file_handle.read())
break
tar_file.close()
if graph_def is None:
raise RuntimeError('Cannot find inference graph in tar archive.')
with self.graph.as_default():
tf.import_graph_def(graph_def, name='')
self.sess = tf.Session(graph=self.graph)
def run(self, image):
"""Runs inference on a single image.
Args:
image: A PIL.Image object, raw input image.
Returns:
resized_image: RGB image resized from original input image.
seg_map: Segmentation map of `resized_image`.
"""
width, height = image.size
resize_ratio = 1.0 * self.INPUT_SIZE / max(width, height)
target_size = (int(resize_ratio * width), int(resize_ratio * height))
resized_image = image.convert('RGB').resize(target_size, Image.ANTIALIAS)
batch_seg_map = self.sess.run(
self.OUTPUT_TENSOR_NAME,
feed_dict={self.INPUT_TENSOR_NAME: [np.asarray(resized_image)]})
seg_map = batch_seg_map[0]
return resized_image, seg_map
def create_pascal_label_colormap():
"""Creates a label colormap used in PASCAL VOC segmentation benchmark.
Returns:
A Colormap for visualizing segmentation results.
"""
colormap = np.zeros((256, 3), dtype=int)
ind = np.arange(256, dtype=int)
for shift in reversed(range(8)):
for channel in range(3):
colormap[:, channel] |= ((ind >> channel) & 1) << shift
ind >>= 3
return colormap
def label_to_color_image(label):
"""Adds color defined by the dataset colormap to the label.
Args:
label: A 2D array with integer type, storing the segmentation label.
Returns:
result: A 2D array with floating type. The element of the array
is the color indexed by the corresponding element in the input label
to the PASCAL color map.
Raises:
ValueError: If label is not of rank 2 or its value is larger than color
map maximum entry.
"""
if label.ndim != 2:
raise ValueError('Expect 2-D input label')
colormap = create_pascal_label_colormap()
if np.max(label) >= len(colormap):
raise ValueError('label value too large.')
return colormap[label]
def vis_segmentation(image, seg_map):
"""Visualizes input image, segmentation map and overlay view."""
plt.figure(figsize=(15, 5))
grid_spec = gridspec.GridSpec(1, 4, width_ratios=[6, 6, 6, 1])
plt.subplot(grid_spec[0])
plt.imshow(image)
plt.axis('off')
plt.title('input image')
plt.subplot(grid_spec[1])
seg_image = label_to_color_image(seg_map).astype(np.uint8)
plt.imshow(seg_image)
plt.axis('off')
plt.title('segmentation map')
plt.subplot(grid_spec[2])
plt.imshow(image)
plt.imshow(seg_image, alpha=0.7)
plt.axis('off')
plt.title('segmentation overlay')
unique_labels = np.unique(seg_map)
ax = plt.subplot(grid_spec[3])
plt.imshow(
FULL_COLOR_MAP[unique_labels].astype(np.uint8), interpolation='nearest')
ax.yaxis.tick_right()
plt.yticks(range(len(unique_labels)), LABEL_NAMES[unique_labels])
plt.xticks([], [])
ax.tick_params(width=0.0)
plt.grid('off')
plt.show()
LABEL_NAMES = np.asarray([
'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',
'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike',
'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tv'
])
FULL_LABEL_MAP = np.arange(len(LABEL_NAMES)).reshape(len(LABEL_NAMES), 1)
FULL_COLOR_MAP = label_to_color_image(FULL_LABEL_MAP)
```
## Select a pretrained model
We have trained the DeepLab model using various backbone networks. Select one from the MODEL_NAME list.
```
MODEL_NAME = 'mobilenetv2_coco_voctrainaug' # @param ['mobilenetv2_coco_voctrainaug', 'mobilenetv2_coco_voctrainval', 'xception_coco_voctrainaug', 'xception_coco_voctrainval']
_DOWNLOAD_URL_PREFIX = 'http://download.tensorflow.org/models/'
_MODEL_URLS = {
'mobilenetv2_coco_voctrainaug':
'deeplabv3_mnv2_pascal_train_aug_2018_01_29.tar.gz',
'mobilenetv2_coco_voctrainval':
'deeplabv3_mnv2_pascal_trainval_2018_01_29.tar.gz',
'xception_coco_voctrainaug':
'deeplabv3_pascal_train_aug_2018_01_04.tar.gz',
'xception_coco_voctrainval':
'deeplabv3_pascal_trainval_2018_01_04.tar.gz',
}
_TARBALL_NAME = 'deeplab_model.tar.gz'
model_dir = tempfile.mkdtemp()
tf.gfile.MakeDirs(model_dir)
download_path = os.path.join(model_dir, _TARBALL_NAME)
print('downloading model, this might take a while...')
urllib.request.urlretrieve(_DOWNLOAD_URL_PREFIX + _MODEL_URLS[MODEL_NAME],
download_path)
print('download completed! loading DeepLab model...')
MODEL = DeepLabModel(download_path)
print('model loaded successfully!')
```
## Run on sample images
Select one of sample images (leave `IMAGE_URL` empty) or feed any internet image
url for inference.
Note that this colab uses single scale inference for fast computation,
so the results may slightly differ from the visualizations in the
[README](https://github.com/tensorflow/models/blob/master/research/deeplab/README.md) file,
which uses multi-scale and left-right flipped inputs.
```
SAMPLE_IMAGE = 'image1' # @param ['image1', 'image2', 'image3']
IMAGE_URL = '' #@param {type:"string"}
_SAMPLE_URL = ('https://github.com/tensorflow/models/blob/master/research/'
'deeplab/g3doc/img/%s.jpg?raw=true')
def run_visualization(url):
"""Inferences DeepLab model and visualizes result."""
try:
f = urllib.request.urlopen(url)
jpeg_str = f.read()
original_im = Image.open(BytesIO(jpeg_str))
except IOError:
print('Cannot retrieve image. Please check url: ' + url)
return
print('running deeplab on image %s...' % url)
resized_im, seg_map = MODEL.run(original_im)
vis_segmentation(resized_im, seg_map)
image_url = IMAGE_URL or _SAMPLE_URL % SAMPLE_IMAGE
run_visualization(image_url)
```
|
github_jupyter
|
!git clone https://github.com/tensorflow/models.git
!git clone https://github.com/cocodataset/cocoapi.git
%cd cocoapi/PythonAPI
!make
!cp -r pycocotools /content/models/research/
%cd /content/models/research/
!protoc object_detection/protos/*.proto --python_out=.
%cd /content/models/research/
%env PYTHONPATH=/env/python:/content/models/research:/content/models/research/slim:/content/models/research/object_detection
%env
!python object_detection/builders/model_builder_test.py
%cd /content/models/research/object_detection
import os
from io import BytesIO
import tarfile
import tempfile
from six.moves import urllib
from matplotlib import gridspec
from matplotlib import pyplot as plt
import numpy as np
from PIL import Image
import tensorflow as tf
class DeepLabModel(object):
"""Class to load deeplab model and run inference."""
INPUT_TENSOR_NAME = 'ImageTensor:0'
OUTPUT_TENSOR_NAME = 'SemanticPredictions:0'
INPUT_SIZE = 513
FROZEN_GRAPH_NAME = 'frozen_inference_graph'
def __init__(self, tarball_path):
"""Creates and loads pretrained deeplab model."""
self.graph = tf.Graph()
graph_def = None
# Extract frozen graph from tar archive.
tar_file = tarfile.open(tarball_path)
for tar_info in tar_file.getmembers():
if self.FROZEN_GRAPH_NAME in os.path.basename(tar_info.name):
file_handle = tar_file.extractfile(tar_info)
graph_def = tf.GraphDef.FromString(file_handle.read())
break
tar_file.close()
if graph_def is None:
raise RuntimeError('Cannot find inference graph in tar archive.')
with self.graph.as_default():
tf.import_graph_def(graph_def, name='')
self.sess = tf.Session(graph=self.graph)
def run(self, image):
"""Runs inference on a single image.
Args:
image: A PIL.Image object, raw input image.
Returns:
resized_image: RGB image resized from original input image.
seg_map: Segmentation map of `resized_image`.
"""
width, height = image.size
resize_ratio = 1.0 * self.INPUT_SIZE / max(width, height)
target_size = (int(resize_ratio * width), int(resize_ratio * height))
resized_image = image.convert('RGB').resize(target_size, Image.ANTIALIAS)
batch_seg_map = self.sess.run(
self.OUTPUT_TENSOR_NAME,
feed_dict={self.INPUT_TENSOR_NAME: [np.asarray(resized_image)]})
seg_map = batch_seg_map[0]
return resized_image, seg_map
def create_pascal_label_colormap():
"""Creates a label colormap used in PASCAL VOC segmentation benchmark.
Returns:
A Colormap for visualizing segmentation results.
"""
colormap = np.zeros((256, 3), dtype=int)
ind = np.arange(256, dtype=int)
for shift in reversed(range(8)):
for channel in range(3):
colormap[:, channel] |= ((ind >> channel) & 1) << shift
ind >>= 3
return colormap
def label_to_color_image(label):
"""Adds color defined by the dataset colormap to the label.
Args:
label: A 2D array with integer type, storing the segmentation label.
Returns:
result: A 2D array with floating type. The element of the array
is the color indexed by the corresponding element in the input label
to the PASCAL color map.
Raises:
ValueError: If label is not of rank 2 or its value is larger than color
map maximum entry.
"""
if label.ndim != 2:
raise ValueError('Expect 2-D input label')
colormap = create_pascal_label_colormap()
if np.max(label) >= len(colormap):
raise ValueError('label value too large.')
return colormap[label]
def vis_segmentation(image, seg_map):
"""Visualizes input image, segmentation map and overlay view."""
plt.figure(figsize=(15, 5))
grid_spec = gridspec.GridSpec(1, 4, width_ratios=[6, 6, 6, 1])
plt.subplot(grid_spec[0])
plt.imshow(image)
plt.axis('off')
plt.title('input image')
plt.subplot(grid_spec[1])
seg_image = label_to_color_image(seg_map).astype(np.uint8)
plt.imshow(seg_image)
plt.axis('off')
plt.title('segmentation map')
plt.subplot(grid_spec[2])
plt.imshow(image)
plt.imshow(seg_image, alpha=0.7)
plt.axis('off')
plt.title('segmentation overlay')
unique_labels = np.unique(seg_map)
ax = plt.subplot(grid_spec[3])
plt.imshow(
FULL_COLOR_MAP[unique_labels].astype(np.uint8), interpolation='nearest')
ax.yaxis.tick_right()
plt.yticks(range(len(unique_labels)), LABEL_NAMES[unique_labels])
plt.xticks([], [])
ax.tick_params(width=0.0)
plt.grid('off')
plt.show()
LABEL_NAMES = np.asarray([
'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',
'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike',
'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tv'
])
FULL_LABEL_MAP = np.arange(len(LABEL_NAMES)).reshape(len(LABEL_NAMES), 1)
FULL_COLOR_MAP = label_to_color_image(FULL_LABEL_MAP)
MODEL_NAME = 'mobilenetv2_coco_voctrainaug' # @param ['mobilenetv2_coco_voctrainaug', 'mobilenetv2_coco_voctrainval', 'xception_coco_voctrainaug', 'xception_coco_voctrainval']
_DOWNLOAD_URL_PREFIX = 'http://download.tensorflow.org/models/'
_MODEL_URLS = {
'mobilenetv2_coco_voctrainaug':
'deeplabv3_mnv2_pascal_train_aug_2018_01_29.tar.gz',
'mobilenetv2_coco_voctrainval':
'deeplabv3_mnv2_pascal_trainval_2018_01_29.tar.gz',
'xception_coco_voctrainaug':
'deeplabv3_pascal_train_aug_2018_01_04.tar.gz',
'xception_coco_voctrainval':
'deeplabv3_pascal_trainval_2018_01_04.tar.gz',
}
_TARBALL_NAME = 'deeplab_model.tar.gz'
model_dir = tempfile.mkdtemp()
tf.gfile.MakeDirs(model_dir)
download_path = os.path.join(model_dir, _TARBALL_NAME)
print('downloading model, this might take a while...')
urllib.request.urlretrieve(_DOWNLOAD_URL_PREFIX + _MODEL_URLS[MODEL_NAME],
download_path)
print('download completed! loading DeepLab model...')
MODEL = DeepLabModel(download_path)
print('model loaded successfully!')
SAMPLE_IMAGE = 'image1' # @param ['image1', 'image2', 'image3']
IMAGE_URL = '' #@param {type:"string"}
_SAMPLE_URL = ('https://github.com/tensorflow/models/blob/master/research/'
'deeplab/g3doc/img/%s.jpg?raw=true')
def run_visualization(url):
"""Inferences DeepLab model and visualizes result."""
try:
f = urllib.request.urlopen(url)
jpeg_str = f.read()
original_im = Image.open(BytesIO(jpeg_str))
except IOError:
print('Cannot retrieve image. Please check url: ' + url)
return
print('running deeplab on image %s...' % url)
resized_im, seg_map = MODEL.run(original_im)
vis_segmentation(resized_im, seg_map)
image_url = IMAGE_URL or _SAMPLE_URL % SAMPLE_IMAGE
run_visualization(image_url)
| 0.858407 | 0.926835 |
# Algorithm_note
동전거슬러주기 문제
```
n = 1260
count=0
coin_types=[500,100,50,10]
for x in coin_types:
count+=n//x
n%=x
print('지금의 n값은 {%d}입니다.'%n)
print(count)
```
위 코드를 보면 화폐의 종류만큼 반복 수행해야 한다. 따라서 화폐의 종류가 K개라고 할 떄 위 소스 코드의 시간 복잡도는 O(K)이다.
참고로 시간 복잡도에서 거슬러 주어야 할 돈 N은 찾아볼 수 없는 것을 알 수 있다.
즉, 이 알고리즘의 시간 복잡도는 동전의 총 종류에만 영향을 받고, 거슬러 줘야 하는 금액의 크기와는 무관하다
## 큰 수의 법칙 (그리디)
```
# 큰 수의 법칙
n, m, k = map(int,input().split())
# n은 배열수, m은 총 더하기 길이, k는 한 숫자가 연속가능한 수
data = list(map(int, input().split()))
data.sort()
first = data[-1]
second = data[-2]
result=0 # 최종 값 저장하기 위해서
while True:
for x in range(k): # 가장 큰 숫자 k번 더하기
if m==0:
break
result+=first
m-=1
if m==0:
break
result+=second # 두 번째 숫자 한번 더하기
m-=1
print(result)
```
이 문제는 일단 입력값 중에서 가장 큰 수와 두 번째 큰 수만 저장하면 된다. 연속으로 더할 수 있는 횟수는 최대 K번이므로 가장 큰 수를 K번 더하고 두 번째로 큰 수를 한 번 더하는 연산을 반복하면 된다.'
but, 이 문제는 M이 10,100 이하이므로 이 방식으로도 문제를 해결할 수 있지만, M의 크기가 100억 이상처럼 커진다면 시간 초과 판정을 받을 것이다.
간단한 수학적 아이디어를 이용해 더 효율적으로 문제를 해결해보자.
예를 들어 N이 5이고 입력값이 다음과 같이 주어졌다고 가정하자.
이 문제를 풀려면 가장 먼저 반복되는 수열에 대해서 파악해야 한다.
가자장 큰 수와 두 번째로 큰 수가 더해질 떄는 특정한 수열 형태로 일정하게 반복해서 더해지는 특징이 있다. 위의 예시에서는 수열 {6,6,6,5}가 반복된다.
그렇다면 반복되는 수열의 길이는 어떻게 될까?
바로 (K+1)로 위의 예시에서는 4가 된다. 따라서 M을 (K+1)로 나눈 몫이 수열이 반복되는 횟수가 된다. 다시 여기에 K를 곱해주면 가장 큰 수가 등장하는 횟수가 된다.
이때 M이 (K+1)로 나누어떨어지지 않는 경우도 고려해야 한다. 그럴 때는 M을 (K+1)로 나눈 나머지만큼 가장 큰 수가 추가로 더해지므로 이를 고려해주어야 한다. 즉, '가장 큰 수가 더해지는 횟수'는 다음과 같다.
int(M/(K+1)) * K+M%(K+1)
결과적으로 위의 식을 이용하여 가장 큰 수가 더해지는 횟수를 구한 다음, 이를 이용해 두 번째로 큰 수가 더해지는 횟수까지 구할 수 있는 것이다. 이를 토대로 파이썬을 이용해 답안을 작성하면 다음과 같다.
```
n,m,k = map(int, input().split())
data=list(map(int,input().split()))
data.sort()
first=data[-1]
second=data[-2]
# 가장 큰 수가 더해지는 횟수 계산
count =int(m/(k+1))*k
count+=m%(k+1)
result=0
result+=(count)*first
result+=(m-count) * second
print(result)
```
## 숫자 카드 게임(그리디)
아이디어 : 각 행마다 가장 작은 수를 찾은 뒤에 그 수 중에서 가장 큰 수를 찾는 게임
```
# 낮은 것 중에서 가장 높은 것.
m,n = map(int,input().split())
for i in range(m):
data = map(int,input().split())
mini = min(data)
result = max(result,mini)
print(result)
```
1이 될 때 까지
방법1
1. N에서 1을 뺀다.
2. N을 K로 나눈다.
```
# 혼자 도전해본 풀이: 정답
n,k = map(int,input().split())
count=0
while n!=1:
if n%k==0:
n=n/k
count+=1
else:
n=n-1
count+=1
print(count)
# 책에서 나온 답
n, k = map(int,input().split())
result =0
# N이 K이상이라면 K로 계속 나누기
while n>=k:
#N이 K로 나누어 떨어지지 않는다면 N에서 1씩 빼기
while n%k!=0:
n-=1
result+=1
# K로 나누기
n//=k
result+=1
#마지막으로 남는 수에 대하여 1씩 빼기
while n>1:
n-=1
result +=1
print(result)
```
## 상하 좌우 문제 (구현,시뮬레이션)
명령에 따라서 차례대로 이동시킨다는 점에서 시뮬레이션 유형이다.
```
1,2+3,4
# 내가 해봤던 풀이, 모르겠다. 3,4가 나와야하는데 2,4가 나왔다.
n = int(input())
mov = list(map(str,input().split()))
init_1=1
init_2=1
for i in mov:
elif i == 'r':
init_2+=1
elif i == 'l':
init_2-=1
elif i == 'u':
init_1-=1
else:
init_1+=1
print((init_1,init_2))
# 문제 해설
n = int(input())
x,y = 1,1
plans = input().split()
move_type=['L','R','U','D']
dx = [0,0,-1,1]
dy = [-1,1,0,0]
for plan in plans:
for i in range(len(move_type)):
if plan == move_type[i]:
nx = x+dx[i]
ny = y+dy[i]
if nx<1 or ny<1 or nx>n or ny>n:
continue
x,y =nx,ny
print(x,y)
```
## 시각(완전 탐색)
완전 탐색 알고리즘은 가능한 경우의 수를 모두 검사해보는 탐색 방법이다.
일반적으로 완전 탐색 알고리즘은 비효율적인 시간 복잡도를 가지고 있으므로 데잍터 개수가 큰 경우에 정상적으로 동작하지 않을 수 있다. 그래서 일반적으로 알고리즘 문제를 풀 떄는 확인(탐색 해야 할 전체 데이터의 개수가 100만개 이하일 때완전 탐색을 사용하면 적절하다.
```
n = int(input())
count=0
for i in range(n+1):
for j in range(60):
for k in range(60):
if '3' in str(i)+str(j)+str(k): #문자열 자료형으로 변환하여 탐색
count+=1
print(count)
```
## 왕실의 나이트
### 많이 취약하다. 복습 많이 하자.
```
int(ord('a'))-int(ord('a'))+1
input_data = input()
row =int(input_data[1])
column = int(ord(input_data[0]))-int(ord('a'))+1
steps =[(-2,-1),(-1,-2),(1,-2),(2,-1),(2,1),(1,2),(-1,2),(-2,1)]
result=0
for step in steps:
# 이동하고자 하는 위치 확인
next_row =row+step[0]
next_columns=column+step[1]
# 해당 위치로 이동이 가능하다면 카운트 증가
if next_row>=1 and next_row<=8 and next_columns>=1 and next_columns<=8:
result+=1
print(result)
# ord — ord(c)는 문자의 유니코드 값을 돌려주는 함수이다.
```
### 게임 개발 (시뮬레이션)
전형적인 시뮬레이션 문제이다. 삼성전자 공채 코딩 테스트에서 자주 출제되는 대표적인 유형이기도 하다.
별도의 알고리즘이 필요하기보다는 문제에서 요구하는 내용을 오류 없이 성실하게 구현만 한다면 풀 수 있다는게 특징.
반복적 숙달이 필요하다.
#### 테크닉 : 일반적으로 방향을 설정해서 이동하는 문제 유형에서는 dx,dy라는 별도의 리스트를 만들어 방향을 정하는 것이 효과적이다.
```
n,m = map(int, input().split())
# 방문한 위치를 저장하기 위한 맵을 생성하여 0으로 초기화
d=[[0] * m for _ in range(n)] # _는? 그냥 반복을 위해 존재. 실제론 안쓰이기 때문에
# 현재 캐릭터의 X좌표, Y좌표, 방향을 입력받기
x,y ,direction = map(int, input().split())
# 북쪽:0,동쪽:1,남쪽:2,서쪽:3
d[x][y] =1 # 현재 좌표 방문 처리, 1로 저장해서 구분하는 듯 하다.
d
# n,m = map(int, input().split())
# 방문한 위치를 저장하기 위한 맵을 생성하여 0으로 초기화. 이건 별개다.
d=[[0] * m for _ in range(n)] # _는? 그냥 반복을 위해 존재. 실제론 안쓰이기 때문에
# 현재 캐릭터의 X좌표, Y좌표, 방향을 입력받기
x,y ,direction = map(int, input().split())
# 북쪽:0,동쪽:1,남쪽:2,서쪽:3
d[x][y] =1 # 현재 좌표 방문 처리, 1로 저장해서 구분하는 듯 하다.
# 전체 맵 정보를 입력받기
array=[]
for i in range(n):
array.append(list(map(int,input().split())))
# 북 ,동 ,남 ,서 방향 정의
dx = [-1,0,1,0]
dy = [0,1,0,-1]
# 왼쪽으로 회전. 이동이 아니라 딱 회전만이다.
def turn_left():
global direction
direction -=1
if direction == -1: # 0에서 -1를 했을 경우
direction=3
# 시뮬레이션 시작
count = 1
turn_time =0
while True:
# 왼쪽으로 회전.
turn_left()
nx= x+dx[direction]
ny= y+dy[direction]
#회전한 이후 정면에 가보지 않은 칸이 존재하는 경우 이동
# 이건 다녀온 적이 있는지, # 이건 바다인지 육지인지. 0이면 육지다.이동가능
if d[nx][ny]==0 and array[nx][ny]==0:
d[nx][ny]=1 # 다녀갔다는 값으로 저장
x=nx
y=ny
count+=1
turn_time=0
continue
else:
turn_time +=1
# 네 방향 모두 갈 수 없는 경우
if turn_time ==4:
nx= x-dx[direction]
ny= y-dy[direction]
# 뒤로 갈 수 있다면 이동하기
if array[nx][ny] ==0:
x=nx
y=ny
# 뒤가 바다로 막혀있는 경우
else:
break
turn_time=0
#정답 출력
print(count)
n,m = map(int,input().split())
a,b,direction = map(int,input().split())
# 방문한 위치 만들기 ,리스트 컴프리헨션
# 다녀갔는지 확인용
d= [[0]*m for _ in range(n)]
d[x][y]=1 # 현재 위치 및 다녀간 곳 체크
# 전체 맵 입력받기
array=[]
for i in range(n):
array.append(list(map(int,input().split())))
dx =[-1,0,1,0]
dy =[0,1,0,-1]
def turn_left():
global direction
direction -=1
if direction==-1:
direction=3
# 시뮬레이션 시작
count =1
turn_time =0
while True:
# 왼쪽으로 회전
turn_left()
nx= x+dx[direction]
ny= y+dy[direction]
if d[nx][ny]==0 and array[nx][ny] ==0:
d[nx][ny] =1
x=nx
y=ny
count+= 1
turn_time=0 # 돌지않음
continue
# 회전한 이후 정면에 가보지 않은 칸이 없거나 바다인 경우
else:
turn_time+=1 # 다른 방향으로
# 네 방향 모두 갈 수 없는 경우
if turn_time ==4:
nx=x-dx[direction]
ny=y-dy[direction]
# 뒤로 갈 수 있다면 이동
if array[nx][ny] ==0:
x=nx
y=ny
else:
break
turn_time=0
# 정답
print(count)
```
|
github_jupyter
|
n = 1260
count=0
coin_types=[500,100,50,10]
for x in coin_types:
count+=n//x
n%=x
print('지금의 n값은 {%d}입니다.'%n)
print(count)
# 큰 수의 법칙
n, m, k = map(int,input().split())
# n은 배열수, m은 총 더하기 길이, k는 한 숫자가 연속가능한 수
data = list(map(int, input().split()))
data.sort()
first = data[-1]
second = data[-2]
result=0 # 최종 값 저장하기 위해서
while True:
for x in range(k): # 가장 큰 숫자 k번 더하기
if m==0:
break
result+=first
m-=1
if m==0:
break
result+=second # 두 번째 숫자 한번 더하기
m-=1
print(result)
n,m,k = map(int, input().split())
data=list(map(int,input().split()))
data.sort()
first=data[-1]
second=data[-2]
# 가장 큰 수가 더해지는 횟수 계산
count =int(m/(k+1))*k
count+=m%(k+1)
result=0
result+=(count)*first
result+=(m-count) * second
print(result)
# 낮은 것 중에서 가장 높은 것.
m,n = map(int,input().split())
for i in range(m):
data = map(int,input().split())
mini = min(data)
result = max(result,mini)
print(result)
# 혼자 도전해본 풀이: 정답
n,k = map(int,input().split())
count=0
while n!=1:
if n%k==0:
n=n/k
count+=1
else:
n=n-1
count+=1
print(count)
# 책에서 나온 답
n, k = map(int,input().split())
result =0
# N이 K이상이라면 K로 계속 나누기
while n>=k:
#N이 K로 나누어 떨어지지 않는다면 N에서 1씩 빼기
while n%k!=0:
n-=1
result+=1
# K로 나누기
n//=k
result+=1
#마지막으로 남는 수에 대하여 1씩 빼기
while n>1:
n-=1
result +=1
print(result)
1,2+3,4
# 내가 해봤던 풀이, 모르겠다. 3,4가 나와야하는데 2,4가 나왔다.
n = int(input())
mov = list(map(str,input().split()))
init_1=1
init_2=1
for i in mov:
elif i == 'r':
init_2+=1
elif i == 'l':
init_2-=1
elif i == 'u':
init_1-=1
else:
init_1+=1
print((init_1,init_2))
# 문제 해설
n = int(input())
x,y = 1,1
plans = input().split()
move_type=['L','R','U','D']
dx = [0,0,-1,1]
dy = [-1,1,0,0]
for plan in plans:
for i in range(len(move_type)):
if plan == move_type[i]:
nx = x+dx[i]
ny = y+dy[i]
if nx<1 or ny<1 or nx>n or ny>n:
continue
x,y =nx,ny
print(x,y)
n = int(input())
count=0
for i in range(n+1):
for j in range(60):
for k in range(60):
if '3' in str(i)+str(j)+str(k): #문자열 자료형으로 변환하여 탐색
count+=1
print(count)
int(ord('a'))-int(ord('a'))+1
input_data = input()
row =int(input_data[1])
column = int(ord(input_data[0]))-int(ord('a'))+1
steps =[(-2,-1),(-1,-2),(1,-2),(2,-1),(2,1),(1,2),(-1,2),(-2,1)]
result=0
for step in steps:
# 이동하고자 하는 위치 확인
next_row =row+step[0]
next_columns=column+step[1]
# 해당 위치로 이동이 가능하다면 카운트 증가
if next_row>=1 and next_row<=8 and next_columns>=1 and next_columns<=8:
result+=1
print(result)
# ord — ord(c)는 문자의 유니코드 값을 돌려주는 함수이다.
n,m = map(int, input().split())
# 방문한 위치를 저장하기 위한 맵을 생성하여 0으로 초기화
d=[[0] * m for _ in range(n)] # _는? 그냥 반복을 위해 존재. 실제론 안쓰이기 때문에
# 현재 캐릭터의 X좌표, Y좌표, 방향을 입력받기
x,y ,direction = map(int, input().split())
# 북쪽:0,동쪽:1,남쪽:2,서쪽:3
d[x][y] =1 # 현재 좌표 방문 처리, 1로 저장해서 구분하는 듯 하다.
d
# n,m = map(int, input().split())
# 방문한 위치를 저장하기 위한 맵을 생성하여 0으로 초기화. 이건 별개다.
d=[[0] * m for _ in range(n)] # _는? 그냥 반복을 위해 존재. 실제론 안쓰이기 때문에
# 현재 캐릭터의 X좌표, Y좌표, 방향을 입력받기
x,y ,direction = map(int, input().split())
# 북쪽:0,동쪽:1,남쪽:2,서쪽:3
d[x][y] =1 # 현재 좌표 방문 처리, 1로 저장해서 구분하는 듯 하다.
# 전체 맵 정보를 입력받기
array=[]
for i in range(n):
array.append(list(map(int,input().split())))
# 북 ,동 ,남 ,서 방향 정의
dx = [-1,0,1,0]
dy = [0,1,0,-1]
# 왼쪽으로 회전. 이동이 아니라 딱 회전만이다.
def turn_left():
global direction
direction -=1
if direction == -1: # 0에서 -1를 했을 경우
direction=3
# 시뮬레이션 시작
count = 1
turn_time =0
while True:
# 왼쪽으로 회전.
turn_left()
nx= x+dx[direction]
ny= y+dy[direction]
#회전한 이후 정면에 가보지 않은 칸이 존재하는 경우 이동
# 이건 다녀온 적이 있는지, # 이건 바다인지 육지인지. 0이면 육지다.이동가능
if d[nx][ny]==0 and array[nx][ny]==0:
d[nx][ny]=1 # 다녀갔다는 값으로 저장
x=nx
y=ny
count+=1
turn_time=0
continue
else:
turn_time +=1
# 네 방향 모두 갈 수 없는 경우
if turn_time ==4:
nx= x-dx[direction]
ny= y-dy[direction]
# 뒤로 갈 수 있다면 이동하기
if array[nx][ny] ==0:
x=nx
y=ny
# 뒤가 바다로 막혀있는 경우
else:
break
turn_time=0
#정답 출력
print(count)
n,m = map(int,input().split())
a,b,direction = map(int,input().split())
# 방문한 위치 만들기 ,리스트 컴프리헨션
# 다녀갔는지 확인용
d= [[0]*m for _ in range(n)]
d[x][y]=1 # 현재 위치 및 다녀간 곳 체크
# 전체 맵 입력받기
array=[]
for i in range(n):
array.append(list(map(int,input().split())))
dx =[-1,0,1,0]
dy =[0,1,0,-1]
def turn_left():
global direction
direction -=1
if direction==-1:
direction=3
# 시뮬레이션 시작
count =1
turn_time =0
while True:
# 왼쪽으로 회전
turn_left()
nx= x+dx[direction]
ny= y+dy[direction]
if d[nx][ny]==0 and array[nx][ny] ==0:
d[nx][ny] =1
x=nx
y=ny
count+= 1
turn_time=0 # 돌지않음
continue
# 회전한 이후 정면에 가보지 않은 칸이 없거나 바다인 경우
else:
turn_time+=1 # 다른 방향으로
# 네 방향 모두 갈 수 없는 경우
if turn_time ==4:
nx=x-dx[direction]
ny=y-dy[direction]
# 뒤로 갈 수 있다면 이동
if array[nx][ny] ==0:
x=nx
y=ny
else:
break
turn_time=0
# 정답
print(count)
| 0.069621 | 0.938407 |
# License Plate Detection with OpenCV
In this project we demonstrate how to use OpenCV only, with traditional computer vision approaches, to perform License Plate Detection (LPD).
We follow two approaches:
1- __Morphology based approach__: where only morphological transforms are used, along with some rules to detect the LP.
2- __Charater based approach__: in addition to basic morphological approaches, basic char detection, also based on morphology, is used as an extra characteristic of the LP.
Further, the problem of Licence Plate Recognition (LPR), by recognizing the number and digits written, can be addressed by the second approach.
In both approaches, we load HD videos (1080p). Due to the camera position, this is the most effective resolution to detect LP patterns.
In both approaches we merge car detection, using background subtraction, to narrow the search space.
For more details, please see [full documentation](doc/DOC.md), and the executable example [notebook](doc/LPD.ipynb)
# Pre-requisites
You need to install the packages in `requirements.txt`:
`pip install -r requirements.txt`
# How to run?
You can run over a video, or single image as follows:
## Video input
You can call the `process_video` as follows:
```
from video import process_video
from char import detect_LP
video_file = 'dat/detection_test.mp4'
video_output_file = 'dat/char_LP_detection.mp4'
process_video(video_file, video_output_file, detect_LP_fn=detect_LP)
```
Note that, you can choose the approach by importing `detect_LP` from char or morpho
You can also do the same from the command line:
```
!python main.py --video_file dat/detection_test.mp4 --video_output_file dat/cars_detection.mp4 --detect_LP_fn 1
```

## Single image
You can use the `char.detect_LP` or `morpho.detect_LP`
```
import cv2
from utils import plot_img
from char import detect_LP_char
img = cv2.imread("imgs/char_frame_180_car_no_lp1.png")
plot_img(img)
detected_img, LPs = detect_LP_char(img)
plot_img(detected_img)
```
You can further debug, and calibrate the rules via the internal functions `char.detect_LP_char` or `char.detect_LP_morpho`
```
import cv2
from utils import plot_img
from morpho import detect_LP_morpho
img = cv2.imread("imgs/char_frame_180_car_no_lp1.png")
plot_img(img)
detected_img, LPs = detect_LP_morpho(cv2.resize(img, (500,500)), L_min=35, L_max=60, W_min=55, W_max=90, debug=True)
plot_img(detected_img)
```
# References
- https://www.pyimagesearch.com/2015/05/25/basic-motion-detection-and-tracking-with-python-and-opencv/
- https://sod.pixlab.io/articles/license-plate-detection.html
- https://github.com/MicrocontrollersAndMore/OpenCV_3_License_Plate_Recognition_Python.git
|
github_jupyter
|
from video import process_video
from char import detect_LP
video_file = 'dat/detection_test.mp4'
video_output_file = 'dat/char_LP_detection.mp4'
process_video(video_file, video_output_file, detect_LP_fn=detect_LP)
!python main.py --video_file dat/detection_test.mp4 --video_output_file dat/cars_detection.mp4 --detect_LP_fn 1
import cv2
from utils import plot_img
from char import detect_LP_char
img = cv2.imread("imgs/char_frame_180_car_no_lp1.png")
plot_img(img)
detected_img, LPs = detect_LP_char(img)
plot_img(detected_img)
import cv2
from utils import plot_img
from morpho import detect_LP_morpho
img = cv2.imread("imgs/char_frame_180_car_no_lp1.png")
plot_img(img)
detected_img, LPs = detect_LP_morpho(cv2.resize(img, (500,500)), L_min=35, L_max=60, W_min=55, W_max=90, debug=True)
plot_img(detected_img)
| 0.430866 | 0.933431 |
```
%load_ext autoreload
%autoreload 2
%matplotlib notebook
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.append('../notebooks_publications/')
import bluranalysis as analysis
# plt.style.use('deblur')
```
# DNF and SNR Bounds
For any illumination sequence with exactly $N$ pulses (or more generally, with total illumination $\sum_{i=1}^n v_i = N)$, we will have (assuming the illumination power of a single pulse is normalized to $1$) a signal-to-noise ratio (SNR) of
$$SNR = \frac{\bar{i}_0 N}{f \sqrt{\sigma_{\text{gray}}^2 + \sigma_d^2 N}}\:,$$
where $\bar{i}_0$ is the average intensity of the object, $\sigma_{\text{gray}}$ is the signal-independent part of the noise, and $\sigma_d$ is the weight on the signal-dependent part of the noise. We define the (normalized) deconvolution noise factor (DNF) $f$ as
$$f = \sqrt{\frac{1}{m} \sum_{i=1}^m \frac{\sigma_1^2}{\sigma_i^2}}\:,$$
where $\sigma_1,...,\sigma_m$ are the ordered singular values of the blur operation $A$. Since $A$ is a convolutional operator with kernel $b=\sum_{j=1}^n v_j \delta_j$, the singular values are given by the power spectrum $\{|(Fb)_i|^2\}_{i=1}^m$. The $v_i$ are illumination values and the $\delta_i$ are the positions.
In this notebook, I discuss
1. a *lower bound* on the DNF, which translates to an *upper bound* on the SNR, i.e. the *best* we could ever do.
2. an *upper bound* on the DNF, which translates into a *lower bound* on the SNR, i.e. the *worst* we can expect to do. This is somewhat less important to our story.
# Lower Bound on DNF
Lemma: We have that
$ f^2 \geq \frac{N^2}{\frac{1}{m}\sum_{i=1}^m \sigma_i^2}$.
*Proof*: Starting with $f^2 = \sigma_1^2\cdot \frac{1}{m} \sum_{i=1}^m \frac{1}{\sigma_i^2}$, note that $\frac{1}{m} \sum_{i=1}^m \frac{1}{\sigma_i^2}$ is the reciporocal of the *harmonic mean* of $\{\sigma_1^2,...,\sigma_m^2\}$. Since the harmonic mean is always less than the *arithmetic mean* [1], we have that
$$ \frac{1}{m} \sum_{i=1}^m \frac{1}{\sigma_i^2}\geq \frac{1}{\frac{1}{m}\sum_{i=1}^m \sigma_i^2}\:. $$
Then the result follows by the observation that $\sigma_1$ is the DC component of the signal, which is $\sum_{i=1}^n v_i = N$.
[1] e.g. https://sites.math.washington.edu/~dumitriu/Inequalities
## From Power Spectrum Average to Illumination Power (Parseval's)
Lemma: We have the following relationship between average squared singular value and illumination values, $\frac{1}{m}\sum_{i=1}^m \sigma_i^2 = \sum_{j=1}^n v_j^2$.
*Proof*: Since the singular values are given by the vector $Fb = F\sum_{j=1}^n v_j \delta_j$, the average squared singular values is given by the inner product
$$\frac{1}{m} \sum_{i=1}^m \sigma_i^2 = \frac{1}{m} (Fb)^H Fb = \frac{1}{m} \sum_{j=1}^n \sum_{\ell=1}^n v_jv_\ell \delta_j^H F^H F\delta_\ell = \sum_{j=1}^n v_j^2 $$
where the final simplification comes from noting that $F^H F = mI$ (since we use un-normalized DFT matrices) and $\delta_j^H\delta_\ell = \mathbf{1}\{j=\ell\}$.
TODO: should we be more careful about DFT scaling in various definitions?
```
# A numerical validation of the above lemma
def check_equiv(n_pulses, kernel_length, padding_size=0, generation='binary'):
kernel = np.zeros(kernel_length+padding_size)
if generation == 'binary':
indicies = np.random.choice(kernel_length, size=n_pulses, replace=False)
kernel[indicies] = 1.0
else:
kernel[0:kernel_length] = np.random.uniform(0,1, size=kernel_length)
x_fft = np.fft.fft(kernel)
sigma_x = np.abs(x_fft)**2
return np.abs(np.sum(kernel**2)-np.mean(sigma_x))
for n_pulses in range(10,100):
kernel_length = 200; padding_size = kernel_length
assert check_equiv(n_pulses, kernel_length, padding_size, 'binary') < 1e-10
assert check_equiv(n_pulses, kernel_length, padding_size, 'uniform') < 1e-10
print("All assertions passed")
```
# Upper Bound on SNR
Proposition: For $\sum_{i=1}^n v_i = N$, we have that $f\geq\sqrt{N}$, i.e. the DNF grows at a rate of at least $\sqrt{N}$.
Corrolary: For any fixed $N$, the best achievable SNR is
$$SNR \leq \frac{\bar{i}_0 \sqrt{N}}{\sqrt{\sigma_{\text{gray}}^2 + \sigma_d^2 N}}\:.$$
*Proof*: Using the previous two lemmas, we have that $ f^2 \geq \frac{N^2}{\sum_{j=1}^n v_j^2}$. Next, notice the fact that
$$\max_{v\in[0,1]^n} \sum_{j=1}^nv_j^2~:~ \sum_{j=1}^n v_j=N$$
is achieved for binary $v$ and has the maximum value $N$ (for integer valued $N$). This gives $f^2\geq N$ which yields the result.
### Importance of $\sigma_\text{gray}$
The above corrollary shows an *upper bound* on SNR that increases with ${N}$.
However, if $\sigma_{\text{gray}}^2\ll \sigma_d^2 $, the SNR will not increase with $N$, and in fact its maximum value,
$$SNR \leq \frac{\bar{i}_0 \sqrt{N}}{\sqrt{\sigma_d^2 N}} = \frac{\bar{i}_0}{\sigma_d}$$
is achieved by strobed illumination (i.e. $N=1$). In other words, when $\sigma_\text{gray}$ is small, strobed will be optimal.
If $\sigma_\text{gray}$ is larger, then the upper bound suggests that the SNR will improve with ${N}$. However, since it is only an upper bound, it is not sufficient. To actually show that there are settings under which SNR improves with ${N}$, we need to show a *lower bound* that increases with $N$.
# Upper Bound on DNF
Lemma: We have the bound
$$ f^2 \leq \frac{1}{m} + \frac{m-1}{m}\frac{N^2}{\sigma_m^2}\:.$$
*Proof*: Starting with $f^2 = \frac{1}{m} \sum_{i=1}^m \frac{\sigma_1^2}{\sigma_i^2}$, note that $\frac{\sigma_1^2}{\sigma_i^2} \leq \frac{\sigma_1^2}{\sigma_m^2}$ for all $i$.
Plugging in this bound for $i=1,...,m-1$, we see that
$$f^2 \leq \frac{1}{m} + \frac{1}{m}\frac{N^2}{\sigma_m^2}\cdot (m-1)\:,$$
where we use that $\sigma_1$ is the DC component of the signal.
# Thoughts on Lower Bound on SNR
To make statement about a lower bound on SNR, we need to have an upper bound on $f$, which requires a lower bound on $\sigma_m^2$ (as per the above lemma). Of course, one lower bound would be zero, but this gives a vacuous result.
If we could show that $\sigma_m^2\geq c N^{2p}$ for some $0<p<1$, we could show that $f\leq CN^{1-p}$ (for some $C$), which would mean $SNR \geq \frac{\bar{i}_0 N^{p}}{C \sqrt{\sigma_{\text{gray}}^2 + \sigma_d^2 N}}$. This lower bound shows that there is a regime (where $N$ is small enough) in which the SNR increases with $N$.
|
github_jupyter
|
%load_ext autoreload
%autoreload 2
%matplotlib notebook
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.append('../notebooks_publications/')
import bluranalysis as analysis
# plt.style.use('deblur')
# A numerical validation of the above lemma
def check_equiv(n_pulses, kernel_length, padding_size=0, generation='binary'):
kernel = np.zeros(kernel_length+padding_size)
if generation == 'binary':
indicies = np.random.choice(kernel_length, size=n_pulses, replace=False)
kernel[indicies] = 1.0
else:
kernel[0:kernel_length] = np.random.uniform(0,1, size=kernel_length)
x_fft = np.fft.fft(kernel)
sigma_x = np.abs(x_fft)**2
return np.abs(np.sum(kernel**2)-np.mean(sigma_x))
for n_pulses in range(10,100):
kernel_length = 200; padding_size = kernel_length
assert check_equiv(n_pulses, kernel_length, padding_size, 'binary') < 1e-10
assert check_equiv(n_pulses, kernel_length, padding_size, 'uniform') < 1e-10
print("All assertions passed")
| 0.338842 | 0.989192 |
## Inference for ResNet 50 using ONNX Runtime
This example demonstrates how to load an image classification model from the [ONNX model zoo](https://github.com/onnx/models) and confirm its accuracy based on included test data.
```
import numpy as np # we're going to use numpy to process input and output data
import onnxruntime # to inference ONNX models, we use the ONNX Runtime
import onnx
from onnx import numpy_helper
import urllib.request
import json
import time
# display images in notebook
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw, ImageFont
%matplotlib inline
onnx_model_url = "https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet50v2/resnet50v2.tar.gz"
imagenet_labels_url = "https://raw.githubusercontent.com/anishathalye/imagenet-simple-labels/master/imagenet-simple-labels.json"
# retrieve our model from the ONNX model zoo
urllib.request.urlretrieve(onnx_model_url, filename="resnet50v2.tar.gz")
urllib.request.urlretrieve(imagenet_labels_url, filename="imagenet-simple-labels.json")
!tar xvzf resnet50v2.tar.gz --warning=no-unknown-keyword
```
### Load sample inputs and outputs
```
test_data_dir = 'resnet50v2/test_data_set'
test_data_num = 3
import glob
import os
# Load inputs
inputs = []
for i in range(test_data_num):
input_file = os.path.join(test_data_dir + '_{}'.format(i), 'input_0.pb')
tensor = onnx.TensorProto()
with open(input_file, 'rb') as f:
tensor.ParseFromString(f.read())
inputs.append(numpy_helper.to_array(tensor))
print('Loaded {} inputs successfully.'.format(test_data_num))
# Load reference outputs
ref_outputs = []
for i in range(test_data_num):
output_file = os.path.join(test_data_dir + '_{}'.format(i), 'output_0.pb')
tensor = onnx.TensorProto()
with open(output_file, 'rb') as f:
tensor.ParseFromString(f.read())
ref_outputs.append(numpy_helper.to_array(tensor))
print('Loaded {} reference outputs successfully.'.format(test_data_num))
```
### Inference using ONNX Runtime
```
# Run the model on the backend
session = onnxruntime.InferenceSession('resnet50v2/resnet50v2.onnx', None)
# get the name of the first input of the model
input_name = session.get_inputs()[0].name
print('Input Name:', input_name)
%%time
outputs = [session.run([], {input_name: inputs[i]})[0] for i in range(test_data_num)]
print('Predicted {} results.'.format(len(outputs)))
# Compare the results with reference outputs up to 4 decimal places
for ref_o, o in zip(ref_outputs, outputs):
np.testing.assert_almost_equal(ref_o, o, 4)
print('ONNX Runtime outputs are similar to reference outputs!')
```
### Classify sample images using our inference session
**Input**: A 224 x 224 pixel image that contains an object
**Task**: Identify the content of input images
**Output**: Class prediction for input image
```
def load_labels(path):
with open(path) as f:
data = json.load(f)
return np.asarray(data)
def preprocess(input_data):
# convert the input data into the float32 input
img_data = input_data.astype('float32')
#normalize
mean_vec = np.array([0.485, 0.456, 0.406])
stddev_vec = np.array([0.229, 0.224, 0.225])
norm_img_data = np.zeros(img_data.shape).astype('float32')
for i in range(img_data.shape[0]):
norm_img_data[i,:,:] = (img_data[i,:,:]/255 - mean_vec[i]) / stddev_vec[i]
#add batch channel
norm_img_data = norm_img_data.reshape(1, 3, 224, 224).astype('float32')
return norm_img_data
def softmax(x):
x = x.reshape(-1)
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
def postprocess(result):
return softmax(np.array(result)).tolist()
labels = load_labels('imagenet-simple-labels.json')
image = Image.open('images/dog.jpg')
# image = Image.open('images/plane.jpg')
print("Image size: ", image.size)
plt.axis('off')
display_image = plt.imshow(image)
image_data = np.array(image).transpose(2, 0, 1)
input_data = preprocess(image_data)
start = time.time()
raw_result = session.run([], {input_name: input_data})
end = time.time()
res = postprocess(raw_result)
inference_time = np.round((end - start) * 1000, 2)
idx = np.argmax(res)
print('========================================')
print('Final top prediction is: ' + labels[idx])
print('========================================')
print('========================================')
print('Inference time: ' + str(inference_time) + " ms")
print('========================================')
sort_idx = np.flip(np.squeeze(np.argsort(res)))
print('============ Top 5 labels are: ============================')
print(labels[sort_idx[:5]])
print('===========================================================')
plt.axis('off')
display_image = plt.imshow(image)
```
|
github_jupyter
|
import numpy as np # we're going to use numpy to process input and output data
import onnxruntime # to inference ONNX models, we use the ONNX Runtime
import onnx
from onnx import numpy_helper
import urllib.request
import json
import time
# display images in notebook
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw, ImageFont
%matplotlib inline
onnx_model_url = "https://s3.amazonaws.com/onnx-model-zoo/resnet/resnet50v2/resnet50v2.tar.gz"
imagenet_labels_url = "https://raw.githubusercontent.com/anishathalye/imagenet-simple-labels/master/imagenet-simple-labels.json"
# retrieve our model from the ONNX model zoo
urllib.request.urlretrieve(onnx_model_url, filename="resnet50v2.tar.gz")
urllib.request.urlretrieve(imagenet_labels_url, filename="imagenet-simple-labels.json")
!tar xvzf resnet50v2.tar.gz --warning=no-unknown-keyword
test_data_dir = 'resnet50v2/test_data_set'
test_data_num = 3
import glob
import os
# Load inputs
inputs = []
for i in range(test_data_num):
input_file = os.path.join(test_data_dir + '_{}'.format(i), 'input_0.pb')
tensor = onnx.TensorProto()
with open(input_file, 'rb') as f:
tensor.ParseFromString(f.read())
inputs.append(numpy_helper.to_array(tensor))
print('Loaded {} inputs successfully.'.format(test_data_num))
# Load reference outputs
ref_outputs = []
for i in range(test_data_num):
output_file = os.path.join(test_data_dir + '_{}'.format(i), 'output_0.pb')
tensor = onnx.TensorProto()
with open(output_file, 'rb') as f:
tensor.ParseFromString(f.read())
ref_outputs.append(numpy_helper.to_array(tensor))
print('Loaded {} reference outputs successfully.'.format(test_data_num))
# Run the model on the backend
session = onnxruntime.InferenceSession('resnet50v2/resnet50v2.onnx', None)
# get the name of the first input of the model
input_name = session.get_inputs()[0].name
print('Input Name:', input_name)
%%time
outputs = [session.run([], {input_name: inputs[i]})[0] for i in range(test_data_num)]
print('Predicted {} results.'.format(len(outputs)))
# Compare the results with reference outputs up to 4 decimal places
for ref_o, o in zip(ref_outputs, outputs):
np.testing.assert_almost_equal(ref_o, o, 4)
print('ONNX Runtime outputs are similar to reference outputs!')
def load_labels(path):
with open(path) as f:
data = json.load(f)
return np.asarray(data)
def preprocess(input_data):
# convert the input data into the float32 input
img_data = input_data.astype('float32')
#normalize
mean_vec = np.array([0.485, 0.456, 0.406])
stddev_vec = np.array([0.229, 0.224, 0.225])
norm_img_data = np.zeros(img_data.shape).astype('float32')
for i in range(img_data.shape[0]):
norm_img_data[i,:,:] = (img_data[i,:,:]/255 - mean_vec[i]) / stddev_vec[i]
#add batch channel
norm_img_data = norm_img_data.reshape(1, 3, 224, 224).astype('float32')
return norm_img_data
def softmax(x):
x = x.reshape(-1)
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
def postprocess(result):
return softmax(np.array(result)).tolist()
labels = load_labels('imagenet-simple-labels.json')
image = Image.open('images/dog.jpg')
# image = Image.open('images/plane.jpg')
print("Image size: ", image.size)
plt.axis('off')
display_image = plt.imshow(image)
image_data = np.array(image).transpose(2, 0, 1)
input_data = preprocess(image_data)
start = time.time()
raw_result = session.run([], {input_name: input_data})
end = time.time()
res = postprocess(raw_result)
inference_time = np.round((end - start) * 1000, 2)
idx = np.argmax(res)
print('========================================')
print('Final top prediction is: ' + labels[idx])
print('========================================')
print('========================================')
print('Inference time: ' + str(inference_time) + " ms")
print('========================================')
sort_idx = np.flip(np.squeeze(np.argsort(res)))
print('============ Top 5 labels are: ============================')
print(labels[sort_idx[:5]])
print('===========================================================')
plt.axis('off')
display_image = plt.imshow(image)
| 0.623492 | 0.895933 |
# Run-Length Encoding
Run-length encoding is a simple method for compressing data that contains long sequences of repeated characters.
In this compression algorithm:
1. A standalone character will be unchanged. E.g `"a"` $\rightarrow$ `["a"]`.
2. A run of a character, `c`, repeated `N` times will be compressed to `["c", "c", N]`. E.g. `"bbbb"` $\rightarrow$ `['b', 'b', 4]`.
These two rules are all that you need to perform run-length encoding.
Let's look at a few examples of run-length-encoding:
- `"abcd"` $\rightarrow$ `['a', 'b', 'c', 'd']`
- `"abbbba"` $\rightarrow$ `['a', 'b', 'b', 4, 'a']`
- `"aaaabbcccd"` $\rightarrow$ `['a', 'a', 4, 'b', 'b', 2, 'c', 'c', 3, 'd']`
- `""` $\rightarrow$ `[]`
- `"1"` $\rightarrow$ `["1"]`
The decompression algorithm, run-length decoding, simply reverses this process:
- `['q', 'a', 'a', 4, 'b', 'b', 2, 'c', 'c', 3, 'd']` $\rightarrow$ `'qaaaabbcccd'`
Here, you will implement a run-length encoding and decoding algorithms. As indicated above, the run-length encoding algorithm should be able to accept a string and return a list with the appropriate string/integer entries, according to the encoding. The decoding algorithm need be able to accept a list with an encoded sequence, and return the decoded string.
You should be able to test both of your algorithms by feeding them into one another:
```python
>>> decoder(encoder("Wooooow!!!!! I'm totally getting compressed"))
"Wooooow!!!!! I'm totally getting compressed"
```
```
# make sure to execute this cell so that your function is defined
# you must re-run this cell any time you make a change to this function
def run_length_encoder(in_string):
string = in_string
if string == '':
rle = []
return rle
i = 0
count = 0
letter = string[i]
rle = []
while i <= len(string) - 1:
while string[i] == letter:
i += 1
count +=1
if i > len(string) - 1:
break
if count == 1:
rle.append(letter)
else:
rle.append(letter)
rle.append(letter)
rle.append(count)
if i > len(string) - 1:
break
letter = string[i]
count = 0
return rle
# Execute this cell to grade your work
from bwsi_grader.python.run_length_encoding import encoder_grader
encoder_grader(run_length_encoder)
# make sure to execute this cell so that your function is defined
# you must re-run this cell any time you make a change to this function
def run_length_decoder(in_list):
if in_list == []:
s = ''
return s
rld = ""
for c in in_list:
if isinstance(c, str):
rld += c
else:
index = len(rld) - 1
for i in range(c-2):
rld += rld[index]
return rld
# Execute this cell to grade your work
from bwsi_grader.python.run_length_encoding import decoder_grader
decoder_grader(run_length_decoder)
```
|
github_jupyter
|
>>> decoder(encoder("Wooooow!!!!! I'm totally getting compressed"))
"Wooooow!!!!! I'm totally getting compressed"
# make sure to execute this cell so that your function is defined
# you must re-run this cell any time you make a change to this function
def run_length_encoder(in_string):
string = in_string
if string == '':
rle = []
return rle
i = 0
count = 0
letter = string[i]
rle = []
while i <= len(string) - 1:
while string[i] == letter:
i += 1
count +=1
if i > len(string) - 1:
break
if count == 1:
rle.append(letter)
else:
rle.append(letter)
rle.append(letter)
rle.append(count)
if i > len(string) - 1:
break
letter = string[i]
count = 0
return rle
# Execute this cell to grade your work
from bwsi_grader.python.run_length_encoding import encoder_grader
encoder_grader(run_length_encoder)
# make sure to execute this cell so that your function is defined
# you must re-run this cell any time you make a change to this function
def run_length_decoder(in_list):
if in_list == []:
s = ''
return s
rld = ""
for c in in_list:
if isinstance(c, str):
rld += c
else:
index = len(rld) - 1
for i in range(c-2):
rld += rld[index]
return rld
# Execute this cell to grade your work
from bwsi_grader.python.run_length_encoding import decoder_grader
decoder_grader(run_length_decoder)
| 0.301156 | 0.89996 |
# HashTag Recommendation based on user's post
Importing required libraries
```
import pandas as pd
import seaborn as sns
%matplotlib inline
import numpy as np
from gensim.models import Word2Vec
import warnings
warnings.filterwarnings('ignore')
from nltk.tokenize import TweetTokenizer
import glob
import nltk
import string
import re
import pprint
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
```
Reading all the csv files to create a corpus
```
allFiles = glob.glob("Data/*/*.csv", recursive=True)
frame = pd.DataFrame()
# To store filenames
filenames = []
for file_ in allFiles:
df = pd.read_csv(file_, encoding = "ISO-8859-1")
filenames.append(file_.split("\\")[-1:][0].split(".")[0])
# print(df.head())
frame = pd.concat([frame,df[df.columns[1]]])
print(allFiles)
```
Converting the corpus to lower case
```
def preprocessing(frame):
frame = frame.apply(lambda x: x.astype(str).str.lower())
frame = frame.apply(lambda x: x.astype(str).str.replace('b\'',''))
frame = frame.apply(lambda x: x.astype(str).str.replace('b\"',''))
return frame
frame = preprocessing(frame)
print(frame.head())
```
Function to build stopwords to remove them
```
def build_stopwords():
stop_words = (stopwords.words('english'))
list_remove = ['.','/','b\'','b\"','xe2','x80','xa6','x99t','x99s','x87','xb4','xaa','x9a','x9b','x93','xb2','x8c','x9c','xb5','xab']
stop_words = stop_words + word_tokenize(string.punctuation) + list_remove
return stop_words
# print(stop_words)
```
Removing stop words and building a corpus
```
corpus = []
tknzr = TweetTokenizer ()
stop_words = build_stopwords()
for row in frame[frame.columns[0]]:
# print (row)
# tweet = []
# tweet.append( )
# print (tweet)
# Removing urls which can be noise
row = re.sub(r'https?:\/\/.*[\r\n]*', '', row)
word_tokens = tknzr.tokenize(row)
# Removing stop words
filtered_sentence = [w for w in word_tokens if not w in stop_words]
corpus.append(filtered_sentence)
print (corpus[:10])
```
Building a word2vec model
```
import timeit
start_time = timeit.default_timer()
model = Word2Vec(corpus, size=1000, window=3, min_count=1, workers=4)
elapsed = timeit.default_timer() - start_time
print(elapsed)
```
To validate the results from model
```
print('Number of words in vocab = ' + str(len(model.wv.vocab)))
# print('Similarity between woman and man = ' + str(model.wv.similarity('man', 'trump')))
#
# pprint.pprint(model.most_similar("support"))
```
Input by user to recommend hashtags
```
input = 'man utd victory fa cup cup win tottenham football'
# input = 'tonight is the book reading sunday holiday night'
''' This function takes input post from the user and suggests hashtags which he can most likely relate his post with
It recommends hashtags based on current trending topics and post content,
not just with the meaning of words but also the
context of the words is taken into consideration
'''
def getHashTagSuggestion(input):
# Tokenizing the input
input = word_tokenize(input)
# Getting a list of hashtags from the data collected
file_inputs = {}
for file in filenames:
file = '#' + file.lower()
file_inputs[file.lower()] = file
# Iterating to find similaity between each words of input and all hashtags
scores_out = []
for out in file_inputs.keys():
list_out = []
if(out in model.wv.vocab):
for word in input:
# print(word)
# print(out)
if(word in model.wv.vocab):
# pprint.pprint(word)
list_out.append(model.similarity(word,out))
# print(model.similarity(word,out))
if(len(list_out) > 0):
scores_out.append((out,sum(list_out) / float(len(list_out))))
# Returns sorted order of hashtag suggestion score
scores_out = sorted(scores_out, key=lambda tup: tup[1],reverse = True)
return scores_out
print (file_inputs)
model.similarity('utd','#facup')
# scores_out = []
```
Displaying top HashTags related to user's post
```
HashTagSuggestion = []
suggestions = getHashTagSuggestion(input)
for out_tuple in suggestions[:5]:
HashTagSuggestion.append(file_inputs[out_tuple[0]])
pprint.pprint (HashTagSuggestion)
```
Defining a HashTag - These words define what a particular HastTag means
```
defining_words = model.most_similar("#book",topn = 10)
pprint.pprint ([x[0] for x in defining_words[:10]])
print(model.wv.vocab)
```
The MIT License (MIT)
Copyright (c) 2016-2017 Rohan Jahagirdar, Sai Nikhil Dogiparty
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
|
github_jupyter
|
import pandas as pd
import seaborn as sns
%matplotlib inline
import numpy as np
from gensim.models import Word2Vec
import warnings
warnings.filterwarnings('ignore')
from nltk.tokenize import TweetTokenizer
import glob
import nltk
import string
import re
import pprint
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
allFiles = glob.glob("Data/*/*.csv", recursive=True)
frame = pd.DataFrame()
# To store filenames
filenames = []
for file_ in allFiles:
df = pd.read_csv(file_, encoding = "ISO-8859-1")
filenames.append(file_.split("\\")[-1:][0].split(".")[0])
# print(df.head())
frame = pd.concat([frame,df[df.columns[1]]])
print(allFiles)
def preprocessing(frame):
frame = frame.apply(lambda x: x.astype(str).str.lower())
frame = frame.apply(lambda x: x.astype(str).str.replace('b\'',''))
frame = frame.apply(lambda x: x.astype(str).str.replace('b\"',''))
return frame
frame = preprocessing(frame)
print(frame.head())
def build_stopwords():
stop_words = (stopwords.words('english'))
list_remove = ['.','/','b\'','b\"','xe2','x80','xa6','x99t','x99s','x87','xb4','xaa','x9a','x9b','x93','xb2','x8c','x9c','xb5','xab']
stop_words = stop_words + word_tokenize(string.punctuation) + list_remove
return stop_words
# print(stop_words)
corpus = []
tknzr = TweetTokenizer ()
stop_words = build_stopwords()
for row in frame[frame.columns[0]]:
# print (row)
# tweet = []
# tweet.append( )
# print (tweet)
# Removing urls which can be noise
row = re.sub(r'https?:\/\/.*[\r\n]*', '', row)
word_tokens = tknzr.tokenize(row)
# Removing stop words
filtered_sentence = [w for w in word_tokens if not w in stop_words]
corpus.append(filtered_sentence)
print (corpus[:10])
import timeit
start_time = timeit.default_timer()
model = Word2Vec(corpus, size=1000, window=3, min_count=1, workers=4)
elapsed = timeit.default_timer() - start_time
print(elapsed)
print('Number of words in vocab = ' + str(len(model.wv.vocab)))
# print('Similarity between woman and man = ' + str(model.wv.similarity('man', 'trump')))
#
# pprint.pprint(model.most_similar("support"))
input = 'man utd victory fa cup cup win tottenham football'
# input = 'tonight is the book reading sunday holiday night'
''' This function takes input post from the user and suggests hashtags which he can most likely relate his post with
It recommends hashtags based on current trending topics and post content,
not just with the meaning of words but also the
context of the words is taken into consideration
'''
def getHashTagSuggestion(input):
# Tokenizing the input
input = word_tokenize(input)
# Getting a list of hashtags from the data collected
file_inputs = {}
for file in filenames:
file = '#' + file.lower()
file_inputs[file.lower()] = file
# Iterating to find similaity between each words of input and all hashtags
scores_out = []
for out in file_inputs.keys():
list_out = []
if(out in model.wv.vocab):
for word in input:
# print(word)
# print(out)
if(word in model.wv.vocab):
# pprint.pprint(word)
list_out.append(model.similarity(word,out))
# print(model.similarity(word,out))
if(len(list_out) > 0):
scores_out.append((out,sum(list_out) / float(len(list_out))))
# Returns sorted order of hashtag suggestion score
scores_out = sorted(scores_out, key=lambda tup: tup[1],reverse = True)
return scores_out
print (file_inputs)
model.similarity('utd','#facup')
# scores_out = []
HashTagSuggestion = []
suggestions = getHashTagSuggestion(input)
for out_tuple in suggestions[:5]:
HashTagSuggestion.append(file_inputs[out_tuple[0]])
pprint.pprint (HashTagSuggestion)
defining_words = model.most_similar("#book",topn = 10)
pprint.pprint ([x[0] for x in defining_words[:10]])
print(model.wv.vocab)
| 0.189296 | 0.584271 |
## Web Scraping
```
import requests
from bs4 import BeautifulSoup
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import requests
import datetime
content = requests.get(
"http://web.mta.info/developers/turnstile.html").content.decode("utf-8")
# Convert the main page contents to soup
soup = BeautifulSoup(content, 'lxml')
# WEB SCRAPER
content = requests.get(
"http://web.mta.info/developers/turnstile.html").content.decode("utf-8")
# Convert the main page contents to soup
soup = BeautifulSoup(content, 'lxml')
LOCATION = 'C:/Users/Harpreet Gaur/Desktop/Turnstile_Analysis' # ex: '/home/tom/Documents/turnstile/Data/'
a_tags = soup.select('div.span-84.last > a')
date_list = []
files_names = []
# looping through all the <a> tag elements
for a_tag in a_tags:
# Convert the inner text of the a tag to a datetime object
d = datetime.datetime.strptime(a_tag.text.strip(), "%A, %B %d, %Y")
# put the formatted date in the array if it is in the year of 2013
if d.year == 2013:
date_list.append(d.strftime("%y%m%d")) # append + format the datetime object to string
# loop the date list
for dl in date_list:
# get the file link from the date
url = "http://web.mta.info/developers/data/nyct/turnstile/turnstile_{}.txt".format(dl)
# Getting the file's content
print("Processing URL: " + url)
content = requests.get(url).content.decode("utf-8")
# Save the content in to a csv file
print("Converting to CSV...")
# Save to file in the data folder
f = open("./turnstile_{}.csv".format(dl), "w")
# Replace the \r escape character that's adding an extra line between each record
f.write(content.replace("\r", ""))
f.close()
print("Finished " + dl + "!\n")
#All
files_names = ['turnstile_130105.csv', 'turnstile_130112.csv', 'turnstile_130119.csv' , 'turnstile_130126.csv' ,
'turnstile_130202.csv' , 'turnstile_130209.csv' ,'turnstile_130216.csv' ,'turnstile_130223.csv' ,'turnstile_130302.csv' ,
'turnstile_130309.csv' ,'turnstile_130316.csv' ,'turnstile_130323.csv' ,'turnstile_130330.csv' ,'turnstile_130406.csv' ,
'turnstile_130413.csv' ,'turnstile_130420.csv' ,'turnstile_130427.csv' ,'turnstile_130504.csv' ,'turnstile_130511.csv' ,
'turnstile_130518.csv' ,'turnstile_130525.csv' ,'turnstile_130601.csv' ,'turnstile_130608.csv' ,'turnstile_130615.csv' ,
'turnstile_130622.csv' ,'turnstile_130629.csv' ,'turnstile_130706.csv' ,'turnstile_130713.csv' ,'turnstile_130720.csv' ,
'turnstile_130727.csv' ,'turnstile_130803.csv' ,'turnstile_130810.csv' ,'turnstile_130817.csv' ,'turnstile_130824.csv' ,
'turnstile_130831.csv' ,'turnstile_130907.csv' ,'turnstile_130914.csv' ,'turnstile_130921.csv' ,'turnstile_130928.csv' ,
'turnstile_131005.csv' ,'turnstile_131012.csv' ,'turnstile_131019.csv' ,'turnstile_131026.csv' ,
'turnstile_131102.csv' ,'turnstile_131109.csv' ,'turnstile_131116.csv' ,'turnstile_131123.csv' ,
'turnstile_131130.csv' ,'turnstile_131207.csv' ,'turnstile_131214.csv' ,'turnstile_131221.csv' ,
'turnstile_131228.csv'
]
columns = ['C/A','UNIT','SCP','DATE1','TIME1','DESC1','ENTRIES1','EXITS1','DATE2','TIME2','DESC2','ENTRIES2','EXITS2','DATE3','TIME3','DESC3','ENTRIES3','EXITS3','DATE4','TIME4','DESC4','ENTRIES4','EXITS4','DATE5','TIME5','DESC5','ENTRIES5','EXITS5','DATE6','TIME6','DESC6','ENTRIES6','EXITS6','DATE7','TIME7','DESC7','ENTRIES7','EXITS7','DATE8','TIME8','DESC8','ENTRIES8','EXITS8']
```
## Further Data-Cleaning
```
station_loc = LOCATION+'/Remote-Booth-Station.csv' # location of the Remote-Booth-Station.csv file in local machine
station_csv = pd.read_csv(station_loc)
LOCATION = 'C:/Users/Harpreet Gaur/Desktop/Turnstile_Analysis/'
def assign_station(df, station_csv):
station=[]
station_csv = station_csv.dropna()
for i in range(len(df)):
unit = df.UNIT.values[i]
ca = df['C/A'].values[i]
a = station_csv[(station_csv['Remote']==unit) & (station_csv['Booth']==ca)]
if not a.empty:
station.append(a.Station.values[0])
else:
station.append(np.NaN)
df['Station'] = station
return df
def remove_recovr_aud():
for i in range(len(files_names)):
data = pd.read_csv(LOCATION + files_names[i], names=columns)
data_fnames = assign_station(data, station_csv)
data_fname_new = data_fnames[(data_fnames['DESC1'] != 'RECOVR AUD') & (data_fnames['DESC2'] != 'RECOVR AUD') & (data_fnames['DESC3'] != 'RECOVR AUD')
& (data_fnames['DESC4'] != 'RECOVR AUD') & (data_fnames['DESC5'] != 'RECOVR AUD') & (data_fnames['DESC6'] != 'RECOVR AUD') & (data_fnames['DESC7'] != 'RECOVR AUD')
& (data_fnames['DESC8'] != 'RECOVR AUD')]
data_fname_new.to_csv(LOCATION + files_names[i])
remove_recovr_aud()
```
## Which station has the most number of units?
```
def station_list():
asd_ = pd.concat([pd.read_csv(LOCATION + files_names[i]) for i in range(len(files_names))], ignore_index=True)
asd = asd_.drop(columns='Unnamed: 0')
sum_ = asd.groupby(['Station']).UNIT.unique()
stations = list(sum_.keys())
unique = []
for i in range(len(sum_)):
unique.append(len(sum_[i]))
return stations[unique.index(max(unique))]
print('The station that has the most number of units is: ' + str(station_list()))
```
## What is the total number of entries & exits across the subway system for February 1, 2013?
```
feb = pd.read_csv(LOCATION + 'turnstile_130202.csv')
def exits(df, day):
exits_ = []
station_ = []
stations = list(df['Station'].unique())
hours = ['0'+str(i)+':00:00' for i in range(0, 4)]
offset = []
for j in range(len(stations)):
df_temp = df[df['Station'] == stations[j]]
exits_stat = []
for i in range(2, len(df_temp)-1):
k = np.nan
for k_0 in range(1,9):
for m in range(len(hours)): # This loop is because not every 1 of Feb starts at the same hours. They start between 00:00:00 and 04:00:00, so we have to loop for this values to get the right one of the row.
if df_temp['DATE%s' % k_0].values[i] == day and df_temp['TIME%s' % k_0].values[i] == hours[m]:
k = k_0
break
if not np.isnan(k):
if k>1:
# Here we start in the first hour of Feb 1, and sum the next values until we get to another date.
kf = range(k+1, 9)
ki = range(1, k+1)
lst = [df['EXITS%s' % l][i] for l in kf if df['EXITS%s' % l][i]!=0] + [df['EXITS%s' % l][i+1] for l in ki if df['EXITS%s' % l][i+1]!=0 or df['EXITS%s' % l][i]!=np.nan]
exits_stat.append(sum(lst) - len(lst)*df['EXITS%s' % (k-1)][i]) # The '- len(lst)*df['EXITS%s' % (k-1)][i]' is to substract the cumulative part.
offset.append(df['EXITS%s' % (k-1)][i])
break
elif k == 1:
lst = [df['EXITS%s' % l][i] for l in range(1, 9) if df['EXITS%s' % l][i]!=0 or df['EXITS%s' % l][i]!=np.nan]
exits_stat.append(sum(lst) - len(lst)*df['EXITS8'][i-1])
offset.append(df['EXITS8'][i])
break
exits_.append(sum(exits_stat))
station_.append(stations[j])
exits = []
station = []
for i in range(len(exits_)):
if abs(exits_[i]) < 10000000:
exits.append(exits_[i])
station.append(station_[i])
return exits, offset, station
# The 'entries' function is the same as the 'exit' but changing EXITS%s --> ENTRIES%s
def entries(df, day):
entries_ = []
station_ = []
stations = list(df['Station'].unique())
hours = ['0'+str(i)+':00:00' for i in range(0, 4)]
offset = []
for j in range(len(stations)):
df_temp = df[df['Station'] == stations[j]]
entries_stat = []
indxs = list(df_temp.index)
for i in range(len(indxs)-1):
k = np.nan
for k_0 in range(1,9):
for m in range(len(hours)):
if df_temp.loc[indxs[i], 'DATE%s' % k_0] == day and df_temp.loc[indxs[i], 'TIME%s' % k_0] == hours[m]:
k = k_0
break
if not np.isnan(k):
if k>1:
kf = range(k+1, 9)
ki = range(1, k+1)
lst = [df_temp.loc[indxs[i], 'ENTRIES%s' % l] for l in kf if df_temp.loc[indxs[i], 'ENTRIES%s' % l]!=0] + [df_temp.loc[indxs[i+1], 'ENTRIES%s' % l] for l in ki if df_temp.loc[indxs[i+1], 'ENTRIES%s' % l]!=0 or df_temp.loc[indxs[i], 'ENTRIES%s' % l]!=np.nan]
entries_stat.append(sum(lst) - len(lst)*df_temp.loc[indxs[i], 'ENTRIES%s' % (k-1)])
offset.append(df_temp.loc[indxs[i], 'ENTRIES%s' % (k-1)])
break
elif k == 1:
lst = [df_temp.loc[indxs[i], 'ENTRIES%s' % l] for l in range(1, 9) if df_temp.loc[indxs[i], 'ENTRIES%s' % l]!=0 or df_temp.loc[indxs[i], 'ENTRIES%s' % l]!=np.nan]
entries_stat.append(sum(lst) - len(lst)*df_temp.loc[indxs[i-1], 'ENTRIES8'])
offset.append(df_temp.loc[indxs[i], 'ENTRIES8'])
break
entries_.append(sum(entries_stat))
station_.append(stations[j])
entries = []
station = []
for i in range(len(entries_)):
if abs(entries_[i]) < 10000000:
entries.append(entries_[i])
station.append(station_[i])
return entries, offset, station
entries_01, entries_31, stations_ent = entries(feb, '02-01-13')
exits_01, exits_31, stations_ex = exits(feb, '02-01-13')
print('Total number of entries for Febraury 1: ' + str(sum(entries_01)))
print('Total number of exits for Febraury 1: ' + str(sum(exits_01)))
```
## Let’s define the busy-ness as sum of entry & exit count. What station was the busiest on February 1, 2013? What turnstile was the busiest on that date?
```
business = [entries_01[i] + exits_01[i] for i in range(len(entries_01))]
ind = business.index(max(business))
print('The bussiest station on 1 Feb was: ' + str(stations_ent[ind]))
feb = pd.read_csv(LOCATION + 'turnstile_130202.csv')
def exits(df, day):
exits_ = []
station_ = []
stations = list(df['SCP'].unique())
hours = ['0'+str(i)+':00:00' for i in range(0, 4)]
offset = []
for j in range(len(stations)):
df_temp = df[df['SCP'] == stations[j]]
exits_stat = []
for i in range(2, len(df_temp)-1):
k = np.nan
for k_0 in range(1,9):
for m in range(len(hours)):
if df_temp['DATE%s' % k_0].values[i] == day and df_temp['TIME%s' % k_0].values[i] == hours[m]:
k = k_0
break
if not np.isnan(k):
if k>1:
kf = range(k+1, 9)
ki = range(1, k+1)
lst = [df['EXITS%s' % l][i] for l in kf if df['EXITS%s' % l][i]!=0] + [df['EXITS%s' % l][i+1] for l in ki if df['EXITS%s' % l][i+1]!=0 or df['EXITS%s' % l][i]!=np.nan]
exits_stat.append(sum(lst) - len(lst)*df['EXITS%s' % (k-1)][i])
offset.append(df['EXITS%s' % (k-1)][i])
break
elif k == 1:
lst = [df['EXITS%s' % l][i] for l in range(1, 9) if df['EXITS%s' % l][i]!=0 or df['EXITS%s' % l][i]!=np.nan]
exits_stat.append(sum(lst) - len(lst)*df['EXITS8'][i-1])
offset.append(df['EXITS8'][i])
break
exits_.append(sum(exits_stat))
station_.append(stations[j])
exits = []
station = []
for i in range(len(exits_)):
if abs(exits_[i]) < 10000000:
exits.append(exits_[i])
station.append(station_[i])
return exits, offset, station
def entries(df, day):
entries_ = []
station_ = []
stations = list(df['SCP'].unique())
hours = ['0'+str(i)+':00:00' for i in range(0, 4)]
offset = []
for j in range(len(stations)):
df_temp = df[df['SCP'] == stations[j]]
entries_stat = []
indxs = list(df_temp.index)
for i in range(len(indxs)-1):
k = np.nan
for k_0 in range(1,9):
for m in range(len(hours)):
if df_temp.loc[indxs[i], 'DATE%s' % k_0] == day and df_temp.loc[indxs[i], 'TIME%s' % k_0] == hours[m]:
k = k_0
break
if not np.isnan(k):
if k>1:
kf = range(k+1, 9)
ki = range(1, k+1)
lst = [df_temp.loc[indxs[i], 'ENTRIES%s' % l] for l in kf if df_temp.loc[indxs[i], 'ENTRIES%s' % l]!=0] + [df_temp.loc[indxs[i+1], 'ENTRIES%s' % l] for l in ki if df_temp.loc[indxs[i+1], 'ENTRIES%s' % l]!=0 or df_temp.loc[indxs[i], 'ENTRIES%s' % l]!=np.nan]
entries_stat.append(sum(lst) - len(lst)*df_temp.loc[indxs[i], 'ENTRIES%s' % (k-1)])
offset.append(df_temp.loc[indxs[i], 'ENTRIES%s' % (k-1)])
break
elif k == 1:
lst = [df_temp.loc[indxs[i], 'ENTRIES%s' % l] for l in range(1, 9) if df_temp.loc[indxs[i], 'ENTRIES%s' % l]!=0 or df_temp.loc[indxs[i], 'ENTRIES%s' % l]!=np.nan]
entries_stat.append(sum(lst) - len(lst)*df_temp.loc[indxs[i-1], 'ENTRIES8'])
offset.append(df_temp.loc[indxs[i], 'ENTRIES8'])
break
entries_.append(sum(entries_stat))
station_.append(stations[j])
entries = []
station = []
for i in range(len(entries_)):
if abs(entries_[i]) < 10000000:
entries.append(entries_[i])
station.append(station_[i])
return entries, offset, station
entries_01, entries_31, stations_ent = entries(feb, '02-01-13')
exits_01, exits_31, stations_ex = exits(feb, '02-01-13')
business = [entries_01[i] + exits_01[i] for i in range(len(entries_01))]
ind = business.index(max(business))
print('The bussiest turnstile on 1 Feb was: ' + str(stations_ent[ind]))
```
### Usefull functions to be used later on
```
def total_entries(data):
df = data.copy()
indexs = list(df.index)
for i in range(len(indexs)):
for k in range(1,9): # Iterate over the columns
if not np.isnan(df.loc[indexs[i], 'ENTRIES%s' % (k)]): # If the corresponding cell is not empty:
if k>1: # If it's not the first cell:
point = df.at[indexs[i], 'ENTRIES%s' % (k)] - df.at[indexs[i], 'ENTRIES%s' % (k-1)] # substract the cumulative part
if point > 0 and point < 100000: #If the substracion is nos an outlier:
df.at[indexs[i], 'ENTRIES%s' % (k)] = point # Replace the cell by the value without the cumulative part
else: # If it was an outlier, replace it by 0
df.at[indexs[i], 'ENTRIES%s' % (k)] = 0
elif k == 1 and indexs[i] == indexs[i-1]+1:
try:
point = df.at[indexs[i], 'ENTRIES1'] - df.at[indexs[i-1], 'ENTRIES8']
if point < 100000 and point > 0 :
df.at[indexs[i], 'ENTRIES1'] = point
else:
df.at[indexs[i], 'ENTRIES1'] = 0
except KeyError:
continue
return df
# The same as the funtion before, but with the exits
def total_exits(data):
df = data.copy()
indexs = list(df.index)
for i in range(len(indexs)):
for k in range(1,9):
if not np.isnan(df.loc[indexs[i], 'ENTRIES%s' % (k)]):
if k>1:
point = df.at[indexs[i], 'EXITS%s' % (k)] - df.at[indexs[i], 'EXITS%s' % (k-1)]
if point > 0 and point < 100000:
df.at[indexs[i], 'EXITS%s' % (k)] = point
else:
df.at[indexs[i], 'EXITS%s' % (k)] = 0
elif k == 1 and indexs[i] == indexs[i-1]+1 :
try:
point = df.at[indexs[i], 'EXITS1'] - df.at[indexs[i-1], 'EXITS8']
if point < 100000 and point > 0 :
df.at[indexs[i], 'EXITS1'] = point
else:
df.at[indexs[i], 'EXITS1'] = 0
except KeyError:
continue
return df
def filter_(df): # This is a filter for the outliers. My criterion was that if a value (tipically of entries or exits) was bigger than 1000000, then I would replace it by the next value.
for i in range(len(df)):
if abs(df[i]) > 1000000:
df[i] = df[i+1]
return df
```
## What stations have seen the most usage growth/decline in 2013?
We define the growth (decline) of usage in a station as the ratio between the mean of the 3 first (last) weeks with the 3 last (first).
If the growth is >5, then that station is included in the most growth usage.
If the decline is <1, then that station is included in the most decline usage.
```
def usage_station():
data_ = pd.concat([pd.read_csv(LOCATION + files_names[i] ) for i in range(len(files_names))], ignore_index=True)
data_ = data_.drop(columns='Unnamed: 0')
stations = list(data_['Station'].unique())
business = []
for j in range(len(stations)):
data = data_[data_.Station == stations[j]]
data = total_entries(data)
data = total_exits(data)
suma_ent = data.groupby(['DATE1']).ENTRIES1.sum() + data.groupby(['DATE1']).ENTRIES2.sum() + data.groupby(['DATE1']).ENTRIES3.sum() + data.groupby(['DATE1']).ENTRIES4.sum() + data.groupby(['DATE1']).ENTRIES5.sum() + data.groupby(['DATE1']).ENTRIES6.sum() + data.groupby(['DATE1']).ENTRIES7.sum() + data.groupby(['DATE1']).ENTRIES8.sum()
suma_ex = data.groupby(['DATE1']).EXITS1.sum() + data.groupby(['DATE1']).EXITS2.sum() + data.groupby(['DATE1']).EXITS3.sum() + data.groupby(['DATE1']).EXITS4.sum() + data.groupby(['DATE1']).EXITS5.sum() + data.groupby(['DATE1']).EXITS6.sum() + data.groupby(['DATE1']).EXITS7.sum() + data.groupby(['DATE1']).EXITS8.sum()
business.append([suma_ent[i] + suma_ex[i] for i in range(len(suma_ent))])
return business, stations
business, stations = usage_station()
growth = []
decline = []
for i in range(len(business)):
if len(business[i]) > 7:
initial = np.mean(business[i][0:3])
final = np.mean(business[i][-4:-1])
if initial/final > 5 and final != 0:
growth.append(stations[i])
elif final/initial > 2 and initial != 0:
decline.append(stations[i])
print('/////')
print('The stations that have seen the most usage growth in 2013 are: ' + str(growth))
print('/////')
print('The stations that have seen the most usage decline in 2013 are: ' + str(decline))
print('/////')
```
## What dates are the least busy? Could you identify days on which stations were not operating at full capacity or closed entirely?
### Busy days
```
def busy_days():
data = pd.concat([pd.read_csv(LOCATION + files_names[i]) for i in range(len(files_names))], ignore_index=True)
data = data.drop(columns='Unnamed: 0')
data = total_entries(data)
data = total_exits(data)
suma_ent = data.groupby(['DATE1']).ENTRIES1.sum() + data.groupby(['DATE1']).ENTRIES2.sum() + data.groupby(['DATE1']).ENTRIES3.sum() + data.groupby(['DATE1']).ENTRIES4.sum() + data.groupby(['DATE1']).ENTRIES5.sum() + data.groupby(['DATE1']).ENTRIES6.sum() + data.groupby(['DATE1']).ENTRIES7.sum() + data.groupby(['DATE1']).ENTRIES8.sum()
suma_ex = data.groupby(['DATE1']).EXITS1.sum() + data.groupby(['DATE1']).EXITS2.sum() + data.groupby(['DATE1']).EXITS3.sum() + data.groupby(['DATE1']).EXITS4.sum() + data.groupby(['DATE1']).EXITS5.sum() + data.groupby(['DATE1']).EXITS6.sum() + data.groupby(['DATE1']).EXITS7.sum() + data.groupby(['DATE1']).EXITS8.sum()
business = [suma_ent[i] + suma_ex[i] for i in range(len(suma_ent))]
mean = np.mean(business)
std = np.std(business)
least_use = []
for i in range(len(business)):
if business[i] < mean - std:
least_use.append(suma_ent.keys()[i])
return print('Least busy days: ' + str(least_use))
def closed_days():
data = pd.concat([pd.read_csv(LOCATION + files_names[i]) for i in range(len(files_names))], ignore_index=True)
data = data.drop(columns='Unnamed: 0')
stations = list(data.Station.unique())
for j in range(len(stations)):
data = data[data.Station == stations[j]]
data = total_entries(data)
data = total_exits(data)
suma_ent = data.groupby(['DATE1']).ENTRIES1.sum() + data.groupby(['DATE1']).ENTRIES2.sum() + data.groupby(['DATE1']).ENTRIES3.sum() + data.groupby(['DATE1']).ENTRIES4.sum() + data.groupby(['DATE1']).ENTRIES5.sum() + data.groupby(['DATE1']).ENTRIES6.sum() + data.groupby(['DATE1']).ENTRIES7.sum() + data.groupby(['DATE1']).ENTRIES8.sum()
suma_ex = data.groupby(['DATE1']).EXITS1.sum() + data.groupby(['DATE1']).EXITS2.sum() + data.groupby(['DATE1']).EXITS3.sum() + data.groupby(['DATE1']).EXITS4.sum() + data.groupby(['DATE1']).EXITS5.sum() + data.groupby(['DATE1']).EXITS6.sum() + data.groupby(['DATE1']).EXITS7.sum() + data.groupby(['DATE1']).EXITS8.sum()
business = [suma_ent[i] + suma_ex[i] for i in range(len(suma_ent))]
mean = np.mean(business)
least_use = []
cero_use = []
for i in range(len(business)):
if business[i] < 0.05*mean:
least_use.append(suma_ent.keys()[i])
if business[i] == 0.0:
cero_use.append(suma_ent.keys()[i])
return print('Station not operating at full capacity days: ' + str(least_use)), print('Station closed entirely days: ' + str(set(cero_use)))
print('/////')
busy_days()
print('/////')
```
### Closed Days
```
def closed_days():
data = pd.concat([pd.read_csv(LOCATION + files_names[i]) for i in range(len(files_names))], ignore_index=True)
data = data.drop(columns='Unnamed: 0')
stations = list(data.Station.unique())
least_use = []
cero_use = []
for j in range(len(stations)):
data = data[data.Station == stations[j]]
data = total_entries(data)
data = total_exits(data)
suma_ent = data.groupby(['DATE1']).ENTRIES1.sum() + data.groupby(['DATE1']).ENTRIES2.sum() + data.groupby(['DATE1']).ENTRIES3.sum() + data.groupby(['DATE1']).ENTRIES4.sum() + data.groupby(['DATE1']).ENTRIES5.sum() + data.groupby(['DATE1']).ENTRIES6.sum() + data.groupby(['DATE1']).ENTRIES7.sum() + data.groupby(['DATE1']).ENTRIES8.sum()
suma_ex = data.groupby(['DATE1']).EXITS1.sum() + data.groupby(['DATE1']).EXITS2.sum() + data.groupby(['DATE1']).EXITS3.sum() + data.groupby(['DATE1']).EXITS4.sum() + data.groupby(['DATE1']).EXITS5.sum() + data.groupby(['DATE1']).EXITS6.sum() + data.groupby(['DATE1']).EXITS7.sum() + data.groupby(['DATE1']).EXITS8.sum()
business = [suma_ent[i] + suma_ex[i] for i in range(len(suma_ent))]
mean = np.mean(business)
for i in range(len(business)):
if business[i] < 0.1*mean:
least_use.append(suma_ent.keys()[i])
if business[i] == 0.0:
cero_use.append(suma_ent.keys()[i])
return print('Station not operating at full capacity days: ' + str(least_use)), print('\n') , print('Station closed entirely days: ' + str(set(cero_use)))
print('/////')
closed_days()
print('/////')
```
## Plots
### Daily row count
```
plt.rcParams['figure.dpi'] = 100
Q_1 = list(reversed(['turnstile_130330.csv', 'turnstile_130323.csv' , 'turnstile_130316.csv'
,'turnstile_130309.csv' , 'turnstile_130302.csv', 'turnstile_130223.csv'
, 'turnstile_130216.csv' , 'turnstile_130209.csv' , 'turnstile_130202.csv'
, 'turnstile_130126.csv' , 'turnstile_130119.csv' , 'turnstile_130112.csv', 'turnstile_130105.csv']))
def plot_1():
data = pd.concat([pd.read_csv(LOCATION + Q_1[i] ) for i in range(len(Q_1))], ignore_index=True)
data = data.drop(columns='Unnamed: 0')
data = total_entries(data)
suma_ent = data.groupby(['DATE1']).ENTRIES1.sum() + data.groupby(['DATE1']).ENTRIES2.sum() + data.groupby(['DATE1']).ENTRIES3.sum() + data.groupby(['DATE1']).ENTRIES4.sum() + data.groupby(['DATE1']).ENTRIES5.sum() + data.groupby(['DATE1']).ENTRIES6.sum() + data.groupby(['DATE1']).ENTRIES7.sum() + data.groupby(['DATE1']).ENTRIES8.sum()
plt.figure(1)
suma_ent.plot(label='Total riders')
plt.xlabel('Dates')
plt.ylabel('Counts')
plt.legend()
plt.grid()
plot_1()
```
### Plot the daily total number of entries & exits across the system for Q1 2013.
```
Q_1 = list(reversed(['turnstile_130330.csv', 'turnstile_130323.csv' , 'turnstile_130316.csv'
,'turnstile_130309.csv' , 'turnstile_130302.csv', 'turnstile_130223.csv'
, 'turnstile_130216.csv' , 'turnstile_130209.csv' , 'turnstile_130202.csv'
, 'turnstile_130126.csv' , 'turnstile_130119.csv' , 'turnstile_130112.csv', 'turnstile_130105.csv']))
def plot_2():
data = pd.concat([pd.read_csv(LOCATION + Q_1[i]) for i in range(len(Q_1))], ignore_index=True)
data = data.drop(columns='Unnamed: 0')
data = total_entries(data)
data = total_exits(data)
suma_ent = data.groupby(['DATE1']).ENTRIES1.sum() + data.groupby(['DATE1']).ENTRIES2.sum() + data.groupby(['DATE1']).ENTRIES3.sum() + data.groupby(['DATE1']).ENTRIES4.sum() + data.groupby(['DATE1']).ENTRIES5.sum() + data.groupby(['DATE1']).ENTRIES6.sum() + data.groupby(['DATE1']).ENTRIES7.sum() + data.groupby(['DATE1']).ENTRIES8.sum()
suma_ex = data.groupby(['DATE1']).EXITS1.sum() + data.groupby(['DATE1']).EXITS2.sum() + data.groupby(['DATE1']).EXITS3.sum() + data.groupby(['DATE1']).EXITS4.sum() + data.groupby(['DATE1']).EXITS5.sum() + data.groupby(['DATE1']).EXITS6.sum() + data.groupby(['DATE1']).EXITS7.sum() + data.groupby(['DATE1']).EXITS8.sum()
plt.figure(1)
suma_ent.plot(label='Total entries')
suma_ex.plot(label='Total exits')
plt.xlabel('Dates')
plt.ylabel('Counts')
plt.legend()
plt.grid()
plot_2()
```
### Plot the mean and standard deviation of the daily total number of entries & exits for each month in Q1 2013 for station 34 ST-PENN STA.
```
Q_1_3 = list(reversed(['turnstile_130330.csv', 'turnstile_130323.csv' , 'turnstile_130316.csv'
,'turnstile_130309.csv' , 'turnstile_130302.csv']))
Q_1_2 = list(reversed(['turnstile_130223.csv'
, 'turnstile_130216.csv' , 'turnstile_130209.csv' , 'turnstile_130202.csv']))
Q_1_1 = list(reversed(['turnstile_130126.csv' , 'turnstile_130119.csv' , 'turnstile_130112.csv', 'turnstile_130105.csv',]))
plt.rcParams['figure.dpi'] = 100
def plot_3():
lst = [Q_1_1, Q_1_2, Q_1_3]
lst_str = ['Jan', 'Feb', 'March']
plt.figure(1)
for j in range(len(lst)):
data = pd.concat([pd.read_csv(LOCATION + lst[j][i], parse_dates=['DATE%s' % k for k in range(1, 9)], infer_datetime_format=True) for i in range(len(Q_1_1))], ignore_index=True)
data = data.drop(columns='Unnamed: 0')
data = data[data['Station'] == '34 ST-PENN STA']
data = total_entries(data)
suma_ent = data.groupby(['DATE1']).ENTRIES1.sum() + data.groupby(['DATE1']).ENTRIES2.sum() + data.groupby(['DATE1']).ENTRIES3.sum() + data.groupby(['DATE1']).ENTRIES4.sum() + data.groupby(['DATE1']).ENTRIES5.sum() + data.groupby(['DATE1']).ENTRIES6.sum() + data.groupby(['DATE1']).ENTRIES7.sum() + data.groupby(['DATE1']).ENTRIES8.sum()
suma_ent = filter_(suma_ent)
plt.plot(j, np.mean(suma_ent.values), 'ro')
plt.errorbar(j, np.mean(suma_ent.values), yerr=np.std(suma_ent.values), ecolor='r')
plt.xticks(range(len(lst)), lst_str)
plt.xlabel('Dates')
plt.ylabel('Counts')
plt.legend(['Total entries'])
plt.grid()
plt.title('Entries')
plt.figure(2)
for j in range(len(lst)):
data = pd.concat([pd.read_csv(LOCATION + lst[j][i], parse_dates=['DATE%s' % k for k in range(1, 9)], infer_datetime_format=True) for i in range(len(Q_1_1))], ignore_index=True)
data = data.drop(columns='Unnamed: 0')
data = data[data['Station'] == '34 ST-PENN STA']
data = total_exits(data)
suma_ex = data.groupby(['DATE1']).EXITS1.sum() + data.groupby(['DATE1']).EXITS2.sum() + data.groupby(['DATE1']).EXITS3.sum() + data.groupby(['DATE1']).EXITS4.sum() + data.groupby(['DATE1']).EXITS5.sum() + data.groupby(['DATE1']).EXITS6.sum() + data.groupby(['DATE1']).EXITS7.sum() + data.groupby(['DATE1']).EXITS8.sum()
suma_ex = filter_(suma_ex)
plt.plot(j, np.mean(suma_ex.values), 'go')
plt.errorbar(j, np.mean(suma_ex.values), yerr=np.std(suma_ent.values), ecolor='g')
plt.xticks(range(len(lst)), lst_str)
plt.xlabel('Dates')
plt.ylabel('Counts')
plt.legend(['Total exits'])
plt.grid()
plt.title('Exits')
plot_3()
```
### Plot 25/50/75 percentile of the daily total number of entries & exits for each month in Q1 2013 for station 34 ST-PENN STA.
```
import matplotlib.patches as mpatches
Q_1_3 = list(reversed(['turnstile_130330.csv', 'turnstile_130323.csv' , 'turnstile_130316.csv'
,'turnstile_130309.csv' , 'turnstile_130302.csv']))
Q_1_2 = list(reversed(['turnstile_130223.csv'
, 'turnstile_130216.csv' , 'turnstile_130209.csv' , 'turnstile_130202.csv']))
Q_1_1 = list(reversed(['turnstile_130126.csv' , 'turnstile_130119.csv' , 'turnstile_130112.csv', 'turnstile_130105.csv',]))
percents = [25, 50, 75]
def plot_4():
lst = [Q_1_1, Q_1_2, Q_1_3]
lst_str = ['Jan', 'Feb', 'March']
plt.figure(1)
for j in range(len(lst)):
data = pd.concat([pd.read_csv(LOCATION + lst[j][i], parse_dates=['DATE%s' % k for k in range(1, 9)], infer_datetime_format=True) for i in range(len(Q_1_1))], ignore_index=True)
data = data.drop(columns='Unnamed: 0')
data = data[data['Station'] == '34 ST-PENN STA']
data = total_entries(data)
data = total_exits(data)
suma_ent = data.groupby(['DATE1']).ENTRIES1.sum() + data.groupby(['DATE1']).ENTRIES2.sum() + data.groupby(['DATE1']).ENTRIES3.sum() + data.groupby(['DATE1']).ENTRIES4.sum() + data.groupby(['DATE1']).ENTRIES5.sum() + data.groupby(['DATE1']).ENTRIES6.sum() + data.groupby(['DATE1']).ENTRIES7.sum() + data.groupby(['DATE1']).ENTRIES8.sum()
suma_ent = filter_(suma_ent)
plt.plot(j, np.percentile(suma_ent.values, 25), 'ro')
plt.plot(j, np.percentile(suma_ent.values, 50), 'go')
plt.plot(j, np.percentile(suma_ent.values, 75), 'bo')
plt.xticks(range(len(lst)), lst_str)
plt.xlabel('Dates')
plt.ylabel('Percentiles')
red_patch = mpatches.Patch(color='red', label='Total entries - 25')
green_patch = mpatches.Patch(color='green', label='Total entries - 50')
blue_patch = mpatches.Patch(color='blue', label='Total entries - 75')
plt.legend(handles=[red_patch, green_patch, blue_patch])
plt.title('Entries')
plt.grid()
plt.figure(2)
for j in range(len(lst)):
data = pd.concat([pd.read_csv(LOCATION + lst[j][i], parse_dates=['DATE%s' % k for k in range(1, 9)], infer_datetime_format=True) for i in range(len(Q_1_1))], ignore_index=True)
data = data.drop(columns='Unnamed: 0')
data = data[data['Station'] == '34 ST-PENN STA']
data = total_entries(data)
data = total_exits(data)
suma_ex = data.groupby(['DATE1']).EXITS1.sum() + data.groupby(['DATE1']).EXITS2.sum() + data.groupby(['DATE1']).EXITS3.sum() + data.groupby(['DATE1']).EXITS4.sum() + data.groupby(['DATE1']).EXITS5.sum() + data.groupby(['DATE1']).EXITS6.sum() + data.groupby(['DATE1']).EXITS7.sum() + data.groupby(['DATE1']).EXITS8.sum()
suma_ex = filter_(suma_ex)
plt.plot(j, np.percentile(suma_ex.values, 25), 'ro')
plt.plot(j, np.percentile(suma_ex.values, 50), 'go')
plt.plot(j, np.percentile(suma_ex.values, 75), 'bo')
plt.xticks(range(len(lst)), lst_str)
plt.xlabel('Dates')
plt.ylabel('Percentiles')
red_patch = mpatches.Patch(color='red', label='Total entries - 25')
green_patch = mpatches.Patch(color='green', label='Total entries - 50')
blue_patch = mpatches.Patch(color='blue', label='Total entries - 75')
plt.legend(handles=[red_patch, green_patch, blue_patch])
plt.title('Exits')
plt.grid()
plot_4()
```
|
github_jupyter
|
import requests
from bs4 import BeautifulSoup
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import requests
import datetime
content = requests.get(
"http://web.mta.info/developers/turnstile.html").content.decode("utf-8")
# Convert the main page contents to soup
soup = BeautifulSoup(content, 'lxml')
# WEB SCRAPER
content = requests.get(
"http://web.mta.info/developers/turnstile.html").content.decode("utf-8")
# Convert the main page contents to soup
soup = BeautifulSoup(content, 'lxml')
LOCATION = 'C:/Users/Harpreet Gaur/Desktop/Turnstile_Analysis' # ex: '/home/tom/Documents/turnstile/Data/'
a_tags = soup.select('div.span-84.last > a')
date_list = []
files_names = []
# looping through all the <a> tag elements
for a_tag in a_tags:
# Convert the inner text of the a tag to a datetime object
d = datetime.datetime.strptime(a_tag.text.strip(), "%A, %B %d, %Y")
# put the formatted date in the array if it is in the year of 2013
if d.year == 2013:
date_list.append(d.strftime("%y%m%d")) # append + format the datetime object to string
# loop the date list
for dl in date_list:
# get the file link from the date
url = "http://web.mta.info/developers/data/nyct/turnstile/turnstile_{}.txt".format(dl)
# Getting the file's content
print("Processing URL: " + url)
content = requests.get(url).content.decode("utf-8")
# Save the content in to a csv file
print("Converting to CSV...")
# Save to file in the data folder
f = open("./turnstile_{}.csv".format(dl), "w")
# Replace the \r escape character that's adding an extra line between each record
f.write(content.replace("\r", ""))
f.close()
print("Finished " + dl + "!\n")
#All
files_names = ['turnstile_130105.csv', 'turnstile_130112.csv', 'turnstile_130119.csv' , 'turnstile_130126.csv' ,
'turnstile_130202.csv' , 'turnstile_130209.csv' ,'turnstile_130216.csv' ,'turnstile_130223.csv' ,'turnstile_130302.csv' ,
'turnstile_130309.csv' ,'turnstile_130316.csv' ,'turnstile_130323.csv' ,'turnstile_130330.csv' ,'turnstile_130406.csv' ,
'turnstile_130413.csv' ,'turnstile_130420.csv' ,'turnstile_130427.csv' ,'turnstile_130504.csv' ,'turnstile_130511.csv' ,
'turnstile_130518.csv' ,'turnstile_130525.csv' ,'turnstile_130601.csv' ,'turnstile_130608.csv' ,'turnstile_130615.csv' ,
'turnstile_130622.csv' ,'turnstile_130629.csv' ,'turnstile_130706.csv' ,'turnstile_130713.csv' ,'turnstile_130720.csv' ,
'turnstile_130727.csv' ,'turnstile_130803.csv' ,'turnstile_130810.csv' ,'turnstile_130817.csv' ,'turnstile_130824.csv' ,
'turnstile_130831.csv' ,'turnstile_130907.csv' ,'turnstile_130914.csv' ,'turnstile_130921.csv' ,'turnstile_130928.csv' ,
'turnstile_131005.csv' ,'turnstile_131012.csv' ,'turnstile_131019.csv' ,'turnstile_131026.csv' ,
'turnstile_131102.csv' ,'turnstile_131109.csv' ,'turnstile_131116.csv' ,'turnstile_131123.csv' ,
'turnstile_131130.csv' ,'turnstile_131207.csv' ,'turnstile_131214.csv' ,'turnstile_131221.csv' ,
'turnstile_131228.csv'
]
columns = ['C/A','UNIT','SCP','DATE1','TIME1','DESC1','ENTRIES1','EXITS1','DATE2','TIME2','DESC2','ENTRIES2','EXITS2','DATE3','TIME3','DESC3','ENTRIES3','EXITS3','DATE4','TIME4','DESC4','ENTRIES4','EXITS4','DATE5','TIME5','DESC5','ENTRIES5','EXITS5','DATE6','TIME6','DESC6','ENTRIES6','EXITS6','DATE7','TIME7','DESC7','ENTRIES7','EXITS7','DATE8','TIME8','DESC8','ENTRIES8','EXITS8']
station_loc = LOCATION+'/Remote-Booth-Station.csv' # location of the Remote-Booth-Station.csv file in local machine
station_csv = pd.read_csv(station_loc)
LOCATION = 'C:/Users/Harpreet Gaur/Desktop/Turnstile_Analysis/'
def assign_station(df, station_csv):
station=[]
station_csv = station_csv.dropna()
for i in range(len(df)):
unit = df.UNIT.values[i]
ca = df['C/A'].values[i]
a = station_csv[(station_csv['Remote']==unit) & (station_csv['Booth']==ca)]
if not a.empty:
station.append(a.Station.values[0])
else:
station.append(np.NaN)
df['Station'] = station
return df
def remove_recovr_aud():
for i in range(len(files_names)):
data = pd.read_csv(LOCATION + files_names[i], names=columns)
data_fnames = assign_station(data, station_csv)
data_fname_new = data_fnames[(data_fnames['DESC1'] != 'RECOVR AUD') & (data_fnames['DESC2'] != 'RECOVR AUD') & (data_fnames['DESC3'] != 'RECOVR AUD')
& (data_fnames['DESC4'] != 'RECOVR AUD') & (data_fnames['DESC5'] != 'RECOVR AUD') & (data_fnames['DESC6'] != 'RECOVR AUD') & (data_fnames['DESC7'] != 'RECOVR AUD')
& (data_fnames['DESC8'] != 'RECOVR AUD')]
data_fname_new.to_csv(LOCATION + files_names[i])
remove_recovr_aud()
def station_list():
asd_ = pd.concat([pd.read_csv(LOCATION + files_names[i]) for i in range(len(files_names))], ignore_index=True)
asd = asd_.drop(columns='Unnamed: 0')
sum_ = asd.groupby(['Station']).UNIT.unique()
stations = list(sum_.keys())
unique = []
for i in range(len(sum_)):
unique.append(len(sum_[i]))
return stations[unique.index(max(unique))]
print('The station that has the most number of units is: ' + str(station_list()))
feb = pd.read_csv(LOCATION + 'turnstile_130202.csv')
def exits(df, day):
exits_ = []
station_ = []
stations = list(df['Station'].unique())
hours = ['0'+str(i)+':00:00' for i in range(0, 4)]
offset = []
for j in range(len(stations)):
df_temp = df[df['Station'] == stations[j]]
exits_stat = []
for i in range(2, len(df_temp)-1):
k = np.nan
for k_0 in range(1,9):
for m in range(len(hours)): # This loop is because not every 1 of Feb starts at the same hours. They start between 00:00:00 and 04:00:00, so we have to loop for this values to get the right one of the row.
if df_temp['DATE%s' % k_0].values[i] == day and df_temp['TIME%s' % k_0].values[i] == hours[m]:
k = k_0
break
if not np.isnan(k):
if k>1:
# Here we start in the first hour of Feb 1, and sum the next values until we get to another date.
kf = range(k+1, 9)
ki = range(1, k+1)
lst = [df['EXITS%s' % l][i] for l in kf if df['EXITS%s' % l][i]!=0] + [df['EXITS%s' % l][i+1] for l in ki if df['EXITS%s' % l][i+1]!=0 or df['EXITS%s' % l][i]!=np.nan]
exits_stat.append(sum(lst) - len(lst)*df['EXITS%s' % (k-1)][i]) # The '- len(lst)*df['EXITS%s' % (k-1)][i]' is to substract the cumulative part.
offset.append(df['EXITS%s' % (k-1)][i])
break
elif k == 1:
lst = [df['EXITS%s' % l][i] for l in range(1, 9) if df['EXITS%s' % l][i]!=0 or df['EXITS%s' % l][i]!=np.nan]
exits_stat.append(sum(lst) - len(lst)*df['EXITS8'][i-1])
offset.append(df['EXITS8'][i])
break
exits_.append(sum(exits_stat))
station_.append(stations[j])
exits = []
station = []
for i in range(len(exits_)):
if abs(exits_[i]) < 10000000:
exits.append(exits_[i])
station.append(station_[i])
return exits, offset, station
# The 'entries' function is the same as the 'exit' but changing EXITS%s --> ENTRIES%s
def entries(df, day):
entries_ = []
station_ = []
stations = list(df['Station'].unique())
hours = ['0'+str(i)+':00:00' for i in range(0, 4)]
offset = []
for j in range(len(stations)):
df_temp = df[df['Station'] == stations[j]]
entries_stat = []
indxs = list(df_temp.index)
for i in range(len(indxs)-1):
k = np.nan
for k_0 in range(1,9):
for m in range(len(hours)):
if df_temp.loc[indxs[i], 'DATE%s' % k_0] == day and df_temp.loc[indxs[i], 'TIME%s' % k_0] == hours[m]:
k = k_0
break
if not np.isnan(k):
if k>1:
kf = range(k+1, 9)
ki = range(1, k+1)
lst = [df_temp.loc[indxs[i], 'ENTRIES%s' % l] for l in kf if df_temp.loc[indxs[i], 'ENTRIES%s' % l]!=0] + [df_temp.loc[indxs[i+1], 'ENTRIES%s' % l] for l in ki if df_temp.loc[indxs[i+1], 'ENTRIES%s' % l]!=0 or df_temp.loc[indxs[i], 'ENTRIES%s' % l]!=np.nan]
entries_stat.append(sum(lst) - len(lst)*df_temp.loc[indxs[i], 'ENTRIES%s' % (k-1)])
offset.append(df_temp.loc[indxs[i], 'ENTRIES%s' % (k-1)])
break
elif k == 1:
lst = [df_temp.loc[indxs[i], 'ENTRIES%s' % l] for l in range(1, 9) if df_temp.loc[indxs[i], 'ENTRIES%s' % l]!=0 or df_temp.loc[indxs[i], 'ENTRIES%s' % l]!=np.nan]
entries_stat.append(sum(lst) - len(lst)*df_temp.loc[indxs[i-1], 'ENTRIES8'])
offset.append(df_temp.loc[indxs[i], 'ENTRIES8'])
break
entries_.append(sum(entries_stat))
station_.append(stations[j])
entries = []
station = []
for i in range(len(entries_)):
if abs(entries_[i]) < 10000000:
entries.append(entries_[i])
station.append(station_[i])
return entries, offset, station
entries_01, entries_31, stations_ent = entries(feb, '02-01-13')
exits_01, exits_31, stations_ex = exits(feb, '02-01-13')
print('Total number of entries for Febraury 1: ' + str(sum(entries_01)))
print('Total number of exits for Febraury 1: ' + str(sum(exits_01)))
business = [entries_01[i] + exits_01[i] for i in range(len(entries_01))]
ind = business.index(max(business))
print('The bussiest station on 1 Feb was: ' + str(stations_ent[ind]))
feb = pd.read_csv(LOCATION + 'turnstile_130202.csv')
def exits(df, day):
exits_ = []
station_ = []
stations = list(df['SCP'].unique())
hours = ['0'+str(i)+':00:00' for i in range(0, 4)]
offset = []
for j in range(len(stations)):
df_temp = df[df['SCP'] == stations[j]]
exits_stat = []
for i in range(2, len(df_temp)-1):
k = np.nan
for k_0 in range(1,9):
for m in range(len(hours)):
if df_temp['DATE%s' % k_0].values[i] == day and df_temp['TIME%s' % k_0].values[i] == hours[m]:
k = k_0
break
if not np.isnan(k):
if k>1:
kf = range(k+1, 9)
ki = range(1, k+1)
lst = [df['EXITS%s' % l][i] for l in kf if df['EXITS%s' % l][i]!=0] + [df['EXITS%s' % l][i+1] for l in ki if df['EXITS%s' % l][i+1]!=0 or df['EXITS%s' % l][i]!=np.nan]
exits_stat.append(sum(lst) - len(lst)*df['EXITS%s' % (k-1)][i])
offset.append(df['EXITS%s' % (k-1)][i])
break
elif k == 1:
lst = [df['EXITS%s' % l][i] for l in range(1, 9) if df['EXITS%s' % l][i]!=0 or df['EXITS%s' % l][i]!=np.nan]
exits_stat.append(sum(lst) - len(lst)*df['EXITS8'][i-1])
offset.append(df['EXITS8'][i])
break
exits_.append(sum(exits_stat))
station_.append(stations[j])
exits = []
station = []
for i in range(len(exits_)):
if abs(exits_[i]) < 10000000:
exits.append(exits_[i])
station.append(station_[i])
return exits, offset, station
def entries(df, day):
entries_ = []
station_ = []
stations = list(df['SCP'].unique())
hours = ['0'+str(i)+':00:00' for i in range(0, 4)]
offset = []
for j in range(len(stations)):
df_temp = df[df['SCP'] == stations[j]]
entries_stat = []
indxs = list(df_temp.index)
for i in range(len(indxs)-1):
k = np.nan
for k_0 in range(1,9):
for m in range(len(hours)):
if df_temp.loc[indxs[i], 'DATE%s' % k_0] == day and df_temp.loc[indxs[i], 'TIME%s' % k_0] == hours[m]:
k = k_0
break
if not np.isnan(k):
if k>1:
kf = range(k+1, 9)
ki = range(1, k+1)
lst = [df_temp.loc[indxs[i], 'ENTRIES%s' % l] for l in kf if df_temp.loc[indxs[i], 'ENTRIES%s' % l]!=0] + [df_temp.loc[indxs[i+1], 'ENTRIES%s' % l] for l in ki if df_temp.loc[indxs[i+1], 'ENTRIES%s' % l]!=0 or df_temp.loc[indxs[i], 'ENTRIES%s' % l]!=np.nan]
entries_stat.append(sum(lst) - len(lst)*df_temp.loc[indxs[i], 'ENTRIES%s' % (k-1)])
offset.append(df_temp.loc[indxs[i], 'ENTRIES%s' % (k-1)])
break
elif k == 1:
lst = [df_temp.loc[indxs[i], 'ENTRIES%s' % l] for l in range(1, 9) if df_temp.loc[indxs[i], 'ENTRIES%s' % l]!=0 or df_temp.loc[indxs[i], 'ENTRIES%s' % l]!=np.nan]
entries_stat.append(sum(lst) - len(lst)*df_temp.loc[indxs[i-1], 'ENTRIES8'])
offset.append(df_temp.loc[indxs[i], 'ENTRIES8'])
break
entries_.append(sum(entries_stat))
station_.append(stations[j])
entries = []
station = []
for i in range(len(entries_)):
if abs(entries_[i]) < 10000000:
entries.append(entries_[i])
station.append(station_[i])
return entries, offset, station
entries_01, entries_31, stations_ent = entries(feb, '02-01-13')
exits_01, exits_31, stations_ex = exits(feb, '02-01-13')
business = [entries_01[i] + exits_01[i] for i in range(len(entries_01))]
ind = business.index(max(business))
print('The bussiest turnstile on 1 Feb was: ' + str(stations_ent[ind]))
def total_entries(data):
df = data.copy()
indexs = list(df.index)
for i in range(len(indexs)):
for k in range(1,9): # Iterate over the columns
if not np.isnan(df.loc[indexs[i], 'ENTRIES%s' % (k)]): # If the corresponding cell is not empty:
if k>1: # If it's not the first cell:
point = df.at[indexs[i], 'ENTRIES%s' % (k)] - df.at[indexs[i], 'ENTRIES%s' % (k-1)] # substract the cumulative part
if point > 0 and point < 100000: #If the substracion is nos an outlier:
df.at[indexs[i], 'ENTRIES%s' % (k)] = point # Replace the cell by the value without the cumulative part
else: # If it was an outlier, replace it by 0
df.at[indexs[i], 'ENTRIES%s' % (k)] = 0
elif k == 1 and indexs[i] == indexs[i-1]+1:
try:
point = df.at[indexs[i], 'ENTRIES1'] - df.at[indexs[i-1], 'ENTRIES8']
if point < 100000 and point > 0 :
df.at[indexs[i], 'ENTRIES1'] = point
else:
df.at[indexs[i], 'ENTRIES1'] = 0
except KeyError:
continue
return df
# The same as the funtion before, but with the exits
def total_exits(data):
df = data.copy()
indexs = list(df.index)
for i in range(len(indexs)):
for k in range(1,9):
if not np.isnan(df.loc[indexs[i], 'ENTRIES%s' % (k)]):
if k>1:
point = df.at[indexs[i], 'EXITS%s' % (k)] - df.at[indexs[i], 'EXITS%s' % (k-1)]
if point > 0 and point < 100000:
df.at[indexs[i], 'EXITS%s' % (k)] = point
else:
df.at[indexs[i], 'EXITS%s' % (k)] = 0
elif k == 1 and indexs[i] == indexs[i-1]+1 :
try:
point = df.at[indexs[i], 'EXITS1'] - df.at[indexs[i-1], 'EXITS8']
if point < 100000 and point > 0 :
df.at[indexs[i], 'EXITS1'] = point
else:
df.at[indexs[i], 'EXITS1'] = 0
except KeyError:
continue
return df
def filter_(df): # This is a filter for the outliers. My criterion was that if a value (tipically of entries or exits) was bigger than 1000000, then I would replace it by the next value.
for i in range(len(df)):
if abs(df[i]) > 1000000:
df[i] = df[i+1]
return df
def usage_station():
data_ = pd.concat([pd.read_csv(LOCATION + files_names[i] ) for i in range(len(files_names))], ignore_index=True)
data_ = data_.drop(columns='Unnamed: 0')
stations = list(data_['Station'].unique())
business = []
for j in range(len(stations)):
data = data_[data_.Station == stations[j]]
data = total_entries(data)
data = total_exits(data)
suma_ent = data.groupby(['DATE1']).ENTRIES1.sum() + data.groupby(['DATE1']).ENTRIES2.sum() + data.groupby(['DATE1']).ENTRIES3.sum() + data.groupby(['DATE1']).ENTRIES4.sum() + data.groupby(['DATE1']).ENTRIES5.sum() + data.groupby(['DATE1']).ENTRIES6.sum() + data.groupby(['DATE1']).ENTRIES7.sum() + data.groupby(['DATE1']).ENTRIES8.sum()
suma_ex = data.groupby(['DATE1']).EXITS1.sum() + data.groupby(['DATE1']).EXITS2.sum() + data.groupby(['DATE1']).EXITS3.sum() + data.groupby(['DATE1']).EXITS4.sum() + data.groupby(['DATE1']).EXITS5.sum() + data.groupby(['DATE1']).EXITS6.sum() + data.groupby(['DATE1']).EXITS7.sum() + data.groupby(['DATE1']).EXITS8.sum()
business.append([suma_ent[i] + suma_ex[i] for i in range(len(suma_ent))])
return business, stations
business, stations = usage_station()
growth = []
decline = []
for i in range(len(business)):
if len(business[i]) > 7:
initial = np.mean(business[i][0:3])
final = np.mean(business[i][-4:-1])
if initial/final > 5 and final != 0:
growth.append(stations[i])
elif final/initial > 2 and initial != 0:
decline.append(stations[i])
print('/////')
print('The stations that have seen the most usage growth in 2013 are: ' + str(growth))
print('/////')
print('The stations that have seen the most usage decline in 2013 are: ' + str(decline))
print('/////')
def busy_days():
data = pd.concat([pd.read_csv(LOCATION + files_names[i]) for i in range(len(files_names))], ignore_index=True)
data = data.drop(columns='Unnamed: 0')
data = total_entries(data)
data = total_exits(data)
suma_ent = data.groupby(['DATE1']).ENTRIES1.sum() + data.groupby(['DATE1']).ENTRIES2.sum() + data.groupby(['DATE1']).ENTRIES3.sum() + data.groupby(['DATE1']).ENTRIES4.sum() + data.groupby(['DATE1']).ENTRIES5.sum() + data.groupby(['DATE1']).ENTRIES6.sum() + data.groupby(['DATE1']).ENTRIES7.sum() + data.groupby(['DATE1']).ENTRIES8.sum()
suma_ex = data.groupby(['DATE1']).EXITS1.sum() + data.groupby(['DATE1']).EXITS2.sum() + data.groupby(['DATE1']).EXITS3.sum() + data.groupby(['DATE1']).EXITS4.sum() + data.groupby(['DATE1']).EXITS5.sum() + data.groupby(['DATE1']).EXITS6.sum() + data.groupby(['DATE1']).EXITS7.sum() + data.groupby(['DATE1']).EXITS8.sum()
business = [suma_ent[i] + suma_ex[i] for i in range(len(suma_ent))]
mean = np.mean(business)
std = np.std(business)
least_use = []
for i in range(len(business)):
if business[i] < mean - std:
least_use.append(suma_ent.keys()[i])
return print('Least busy days: ' + str(least_use))
def closed_days():
data = pd.concat([pd.read_csv(LOCATION + files_names[i]) for i in range(len(files_names))], ignore_index=True)
data = data.drop(columns='Unnamed: 0')
stations = list(data.Station.unique())
for j in range(len(stations)):
data = data[data.Station == stations[j]]
data = total_entries(data)
data = total_exits(data)
suma_ent = data.groupby(['DATE1']).ENTRIES1.sum() + data.groupby(['DATE1']).ENTRIES2.sum() + data.groupby(['DATE1']).ENTRIES3.sum() + data.groupby(['DATE1']).ENTRIES4.sum() + data.groupby(['DATE1']).ENTRIES5.sum() + data.groupby(['DATE1']).ENTRIES6.sum() + data.groupby(['DATE1']).ENTRIES7.sum() + data.groupby(['DATE1']).ENTRIES8.sum()
suma_ex = data.groupby(['DATE1']).EXITS1.sum() + data.groupby(['DATE1']).EXITS2.sum() + data.groupby(['DATE1']).EXITS3.sum() + data.groupby(['DATE1']).EXITS4.sum() + data.groupby(['DATE1']).EXITS5.sum() + data.groupby(['DATE1']).EXITS6.sum() + data.groupby(['DATE1']).EXITS7.sum() + data.groupby(['DATE1']).EXITS8.sum()
business = [suma_ent[i] + suma_ex[i] for i in range(len(suma_ent))]
mean = np.mean(business)
least_use = []
cero_use = []
for i in range(len(business)):
if business[i] < 0.05*mean:
least_use.append(suma_ent.keys()[i])
if business[i] == 0.0:
cero_use.append(suma_ent.keys()[i])
return print('Station not operating at full capacity days: ' + str(least_use)), print('Station closed entirely days: ' + str(set(cero_use)))
print('/////')
busy_days()
print('/////')
def closed_days():
data = pd.concat([pd.read_csv(LOCATION + files_names[i]) for i in range(len(files_names))], ignore_index=True)
data = data.drop(columns='Unnamed: 0')
stations = list(data.Station.unique())
least_use = []
cero_use = []
for j in range(len(stations)):
data = data[data.Station == stations[j]]
data = total_entries(data)
data = total_exits(data)
suma_ent = data.groupby(['DATE1']).ENTRIES1.sum() + data.groupby(['DATE1']).ENTRIES2.sum() + data.groupby(['DATE1']).ENTRIES3.sum() + data.groupby(['DATE1']).ENTRIES4.sum() + data.groupby(['DATE1']).ENTRIES5.sum() + data.groupby(['DATE1']).ENTRIES6.sum() + data.groupby(['DATE1']).ENTRIES7.sum() + data.groupby(['DATE1']).ENTRIES8.sum()
suma_ex = data.groupby(['DATE1']).EXITS1.sum() + data.groupby(['DATE1']).EXITS2.sum() + data.groupby(['DATE1']).EXITS3.sum() + data.groupby(['DATE1']).EXITS4.sum() + data.groupby(['DATE1']).EXITS5.sum() + data.groupby(['DATE1']).EXITS6.sum() + data.groupby(['DATE1']).EXITS7.sum() + data.groupby(['DATE1']).EXITS8.sum()
business = [suma_ent[i] + suma_ex[i] for i in range(len(suma_ent))]
mean = np.mean(business)
for i in range(len(business)):
if business[i] < 0.1*mean:
least_use.append(suma_ent.keys()[i])
if business[i] == 0.0:
cero_use.append(suma_ent.keys()[i])
return print('Station not operating at full capacity days: ' + str(least_use)), print('\n') , print('Station closed entirely days: ' + str(set(cero_use)))
print('/////')
closed_days()
print('/////')
plt.rcParams['figure.dpi'] = 100
Q_1 = list(reversed(['turnstile_130330.csv', 'turnstile_130323.csv' , 'turnstile_130316.csv'
,'turnstile_130309.csv' , 'turnstile_130302.csv', 'turnstile_130223.csv'
, 'turnstile_130216.csv' , 'turnstile_130209.csv' , 'turnstile_130202.csv'
, 'turnstile_130126.csv' , 'turnstile_130119.csv' , 'turnstile_130112.csv', 'turnstile_130105.csv']))
def plot_1():
data = pd.concat([pd.read_csv(LOCATION + Q_1[i] ) for i in range(len(Q_1))], ignore_index=True)
data = data.drop(columns='Unnamed: 0')
data = total_entries(data)
suma_ent = data.groupby(['DATE1']).ENTRIES1.sum() + data.groupby(['DATE1']).ENTRIES2.sum() + data.groupby(['DATE1']).ENTRIES3.sum() + data.groupby(['DATE1']).ENTRIES4.sum() + data.groupby(['DATE1']).ENTRIES5.sum() + data.groupby(['DATE1']).ENTRIES6.sum() + data.groupby(['DATE1']).ENTRIES7.sum() + data.groupby(['DATE1']).ENTRIES8.sum()
plt.figure(1)
suma_ent.plot(label='Total riders')
plt.xlabel('Dates')
plt.ylabel('Counts')
plt.legend()
plt.grid()
plot_1()
Q_1 = list(reversed(['turnstile_130330.csv', 'turnstile_130323.csv' , 'turnstile_130316.csv'
,'turnstile_130309.csv' , 'turnstile_130302.csv', 'turnstile_130223.csv'
, 'turnstile_130216.csv' , 'turnstile_130209.csv' , 'turnstile_130202.csv'
, 'turnstile_130126.csv' , 'turnstile_130119.csv' , 'turnstile_130112.csv', 'turnstile_130105.csv']))
def plot_2():
data = pd.concat([pd.read_csv(LOCATION + Q_1[i]) for i in range(len(Q_1))], ignore_index=True)
data = data.drop(columns='Unnamed: 0')
data = total_entries(data)
data = total_exits(data)
suma_ent = data.groupby(['DATE1']).ENTRIES1.sum() + data.groupby(['DATE1']).ENTRIES2.sum() + data.groupby(['DATE1']).ENTRIES3.sum() + data.groupby(['DATE1']).ENTRIES4.sum() + data.groupby(['DATE1']).ENTRIES5.sum() + data.groupby(['DATE1']).ENTRIES6.sum() + data.groupby(['DATE1']).ENTRIES7.sum() + data.groupby(['DATE1']).ENTRIES8.sum()
suma_ex = data.groupby(['DATE1']).EXITS1.sum() + data.groupby(['DATE1']).EXITS2.sum() + data.groupby(['DATE1']).EXITS3.sum() + data.groupby(['DATE1']).EXITS4.sum() + data.groupby(['DATE1']).EXITS5.sum() + data.groupby(['DATE1']).EXITS6.sum() + data.groupby(['DATE1']).EXITS7.sum() + data.groupby(['DATE1']).EXITS8.sum()
plt.figure(1)
suma_ent.plot(label='Total entries')
suma_ex.plot(label='Total exits')
plt.xlabel('Dates')
plt.ylabel('Counts')
plt.legend()
plt.grid()
plot_2()
Q_1_3 = list(reversed(['turnstile_130330.csv', 'turnstile_130323.csv' , 'turnstile_130316.csv'
,'turnstile_130309.csv' , 'turnstile_130302.csv']))
Q_1_2 = list(reversed(['turnstile_130223.csv'
, 'turnstile_130216.csv' , 'turnstile_130209.csv' , 'turnstile_130202.csv']))
Q_1_1 = list(reversed(['turnstile_130126.csv' , 'turnstile_130119.csv' , 'turnstile_130112.csv', 'turnstile_130105.csv',]))
plt.rcParams['figure.dpi'] = 100
def plot_3():
lst = [Q_1_1, Q_1_2, Q_1_3]
lst_str = ['Jan', 'Feb', 'March']
plt.figure(1)
for j in range(len(lst)):
data = pd.concat([pd.read_csv(LOCATION + lst[j][i], parse_dates=['DATE%s' % k for k in range(1, 9)], infer_datetime_format=True) for i in range(len(Q_1_1))], ignore_index=True)
data = data.drop(columns='Unnamed: 0')
data = data[data['Station'] == '34 ST-PENN STA']
data = total_entries(data)
suma_ent = data.groupby(['DATE1']).ENTRIES1.sum() + data.groupby(['DATE1']).ENTRIES2.sum() + data.groupby(['DATE1']).ENTRIES3.sum() + data.groupby(['DATE1']).ENTRIES4.sum() + data.groupby(['DATE1']).ENTRIES5.sum() + data.groupby(['DATE1']).ENTRIES6.sum() + data.groupby(['DATE1']).ENTRIES7.sum() + data.groupby(['DATE1']).ENTRIES8.sum()
suma_ent = filter_(suma_ent)
plt.plot(j, np.mean(suma_ent.values), 'ro')
plt.errorbar(j, np.mean(suma_ent.values), yerr=np.std(suma_ent.values), ecolor='r')
plt.xticks(range(len(lst)), lst_str)
plt.xlabel('Dates')
plt.ylabel('Counts')
plt.legend(['Total entries'])
plt.grid()
plt.title('Entries')
plt.figure(2)
for j in range(len(lst)):
data = pd.concat([pd.read_csv(LOCATION + lst[j][i], parse_dates=['DATE%s' % k for k in range(1, 9)], infer_datetime_format=True) for i in range(len(Q_1_1))], ignore_index=True)
data = data.drop(columns='Unnamed: 0')
data = data[data['Station'] == '34 ST-PENN STA']
data = total_exits(data)
suma_ex = data.groupby(['DATE1']).EXITS1.sum() + data.groupby(['DATE1']).EXITS2.sum() + data.groupby(['DATE1']).EXITS3.sum() + data.groupby(['DATE1']).EXITS4.sum() + data.groupby(['DATE1']).EXITS5.sum() + data.groupby(['DATE1']).EXITS6.sum() + data.groupby(['DATE1']).EXITS7.sum() + data.groupby(['DATE1']).EXITS8.sum()
suma_ex = filter_(suma_ex)
plt.plot(j, np.mean(suma_ex.values), 'go')
plt.errorbar(j, np.mean(suma_ex.values), yerr=np.std(suma_ent.values), ecolor='g')
plt.xticks(range(len(lst)), lst_str)
plt.xlabel('Dates')
plt.ylabel('Counts')
plt.legend(['Total exits'])
plt.grid()
plt.title('Exits')
plot_3()
import matplotlib.patches as mpatches
Q_1_3 = list(reversed(['turnstile_130330.csv', 'turnstile_130323.csv' , 'turnstile_130316.csv'
,'turnstile_130309.csv' , 'turnstile_130302.csv']))
Q_1_2 = list(reversed(['turnstile_130223.csv'
, 'turnstile_130216.csv' , 'turnstile_130209.csv' , 'turnstile_130202.csv']))
Q_1_1 = list(reversed(['turnstile_130126.csv' , 'turnstile_130119.csv' , 'turnstile_130112.csv', 'turnstile_130105.csv',]))
percents = [25, 50, 75]
def plot_4():
lst = [Q_1_1, Q_1_2, Q_1_3]
lst_str = ['Jan', 'Feb', 'March']
plt.figure(1)
for j in range(len(lst)):
data = pd.concat([pd.read_csv(LOCATION + lst[j][i], parse_dates=['DATE%s' % k for k in range(1, 9)], infer_datetime_format=True) for i in range(len(Q_1_1))], ignore_index=True)
data = data.drop(columns='Unnamed: 0')
data = data[data['Station'] == '34 ST-PENN STA']
data = total_entries(data)
data = total_exits(data)
suma_ent = data.groupby(['DATE1']).ENTRIES1.sum() + data.groupby(['DATE1']).ENTRIES2.sum() + data.groupby(['DATE1']).ENTRIES3.sum() + data.groupby(['DATE1']).ENTRIES4.sum() + data.groupby(['DATE1']).ENTRIES5.sum() + data.groupby(['DATE1']).ENTRIES6.sum() + data.groupby(['DATE1']).ENTRIES7.sum() + data.groupby(['DATE1']).ENTRIES8.sum()
suma_ent = filter_(suma_ent)
plt.plot(j, np.percentile(suma_ent.values, 25), 'ro')
plt.plot(j, np.percentile(suma_ent.values, 50), 'go')
plt.plot(j, np.percentile(suma_ent.values, 75), 'bo')
plt.xticks(range(len(lst)), lst_str)
plt.xlabel('Dates')
plt.ylabel('Percentiles')
red_patch = mpatches.Patch(color='red', label='Total entries - 25')
green_patch = mpatches.Patch(color='green', label='Total entries - 50')
blue_patch = mpatches.Patch(color='blue', label='Total entries - 75')
plt.legend(handles=[red_patch, green_patch, blue_patch])
plt.title('Entries')
plt.grid()
plt.figure(2)
for j in range(len(lst)):
data = pd.concat([pd.read_csv(LOCATION + lst[j][i], parse_dates=['DATE%s' % k for k in range(1, 9)], infer_datetime_format=True) for i in range(len(Q_1_1))], ignore_index=True)
data = data.drop(columns='Unnamed: 0')
data = data[data['Station'] == '34 ST-PENN STA']
data = total_entries(data)
data = total_exits(data)
suma_ex = data.groupby(['DATE1']).EXITS1.sum() + data.groupby(['DATE1']).EXITS2.sum() + data.groupby(['DATE1']).EXITS3.sum() + data.groupby(['DATE1']).EXITS4.sum() + data.groupby(['DATE1']).EXITS5.sum() + data.groupby(['DATE1']).EXITS6.sum() + data.groupby(['DATE1']).EXITS7.sum() + data.groupby(['DATE1']).EXITS8.sum()
suma_ex = filter_(suma_ex)
plt.plot(j, np.percentile(suma_ex.values, 25), 'ro')
plt.plot(j, np.percentile(suma_ex.values, 50), 'go')
plt.plot(j, np.percentile(suma_ex.values, 75), 'bo')
plt.xticks(range(len(lst)), lst_str)
plt.xlabel('Dates')
plt.ylabel('Percentiles')
red_patch = mpatches.Patch(color='red', label='Total entries - 25')
green_patch = mpatches.Patch(color='green', label='Total entries - 50')
blue_patch = mpatches.Patch(color='blue', label='Total entries - 75')
plt.legend(handles=[red_patch, green_patch, blue_patch])
plt.title('Exits')
plt.grid()
plot_4()
| 0.104044 | 0.26736 |
<a href="https://colab.research.google.com/github/ezzatmostafa96/Action-Recognition-in-Real-Time/blob/master/19_5_tsn_r50.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
! git clone https://github.com/Tessellate-Imaging/Monk_Object_Detection.git
! cd Monk_Object_Detection/18_mmaction/installation && cat requirements.txt | xargs -n 1 -L 1 pip install -vvvv
! pip install pillow==6.0.0
# Restart runtime
! cd Monk_Object_Detection/18_mmaction/lib/ && pip install -vvvv -e .
! pip uninstall -y mmcv
! cd Monk_Object_Detection/18_mmaction/lib/mmcv && MMCV_WITH_OPS=1 pip install -vvvv -e .
# Restart runtime
! mkdir Dataset
! mkdir Dataset/Videos
!mkdir Anomaly
from distutils.dir_util import copy_tree
# copy subdirectory example
fromDirectory = "/content/drive/MyDrive/Anomaly-Videos-Part-1/Arrest"
toDirectory = "/content/Anomaly"
copy_tree(fromDirectory, toDirectory)
fromDirectory = "/content/drive/MyDrive/Anomaly-Videos-Part-1/Assault"
toDirectory = "/content/Anomaly"
copy_tree(fromDirectory, toDirectory)
fromDirectory = "/content/drive/MyDrive/Anomaly-Videos-Part-2/Burglary"
toDirectory = "/content/Anomaly"
copy_tree(fromDirectory, toDirectory)
!mkdir Normal
fromDirectory = "/content/drive/MyDrive/Training-Normal-Videos-Part-1"
toDirectory = "/content/Normal"
copy_tree(fromDirectory, toDirectory)
fromDirectory = "/content/drive/MyDrive/Anomaly-Videos-Part-3/Robbery"
toDirectory = "/content/Anomaly"
copy_tree(fromDirectory, toDirectory)
import os
from random import sample
files = os.listdir('/content/Normal')
for file in sample(files,63):
os.remove('/content/Normal/' + file)
print("Class Anomaly: {}".format(len(os.listdir("/content/Anomaly"))))
print("Class Normal: {}".format(len(os.listdir("/content/Normal"))))
classes = ["Anomaly", "Normal"]
f = open("Dataset/classes.txt", 'w');
for i in range(len(classes)):
f.write(classes[i] + "\n");
f.close();
import os
from tqdm import tqdm
combined = [];
!mkdir All
!mv /content/Anomaly /content/All
!mv /content/Normal /content/All
folder_name = "/content/All/"
class_name = "Anomaly";
folder_name += class_name + "/";
vid_list = os.listdir(folder_name);
for i in tqdm(range(len(vid_list))):
os.system("cp " + folder_name + vid_list[i] + " Dataset/Videos/")
wr = vid_list[i] + " " + str(classes.index(class_name))
combined.append(wr)
folder_name = "/content/All/"
class_name = "Normal";
folder_name += class_name + "/";
vid_list = os.listdir(folder_name);
for i in tqdm(range(len(vid_list))):
os.system("cp " + folder_name + vid_list[i] + " Dataset/Videos/")
wr = vid_list[i] + " " + str(classes.index(class_name))
combined.append(wr)
import random
random.shuffle(combined)
train_list = combined[:100];
val_list = combined[100:];
f = open("Dataset/train.txt", 'w')
for i in range(len(train_list)):
f.write(train_list[i] + "\n");
f.close();
f = open("Dataset/val.txt", 'w')
for i in range(len(val_list)):
f.write(val_list[i] + "\n");
f.close();
import os
import sys
sys.path.append("Monk_Object_Detection/18_mmaction/lib");
from train_engine import Detector_Videos
gtf = Detector_Videos();
video_dir = 'Dataset/Videos/';
anno_file = 'Dataset/train.txt';
classes_list_file = 'Dataset/classes.txt';
gtf.Train_Video_Dataset(video_dir, anno_file, classes_list_file);
video_dir = 'Dataset/Videos/';
anno_file = 'Dataset/val.txt';
gtf.Val_Video_Dataset(video_dir, anno_file);
gtf.Dataset_Params(batch_size=8, num_workers=4)
gtf.List_Models();
gtf.Model_Params(model_name="tsn_r50", gpu_devices=[0])
gtf.Hyper_Params(lr=0.001, momentum=0.9, weight_decay=0.0001)
gtf.Training_Params(num_epochs=100, val_interval=10)
gtf.Train();
from infer_engine import Infer_Videos
import os
import sys
sys.path.append("Monk_Object_Detection/18_mmaction/lib");
gtf = Infer_Videos();
gtf.Dataset_Params('/content/Dataset/classes.txt');
config_file = "/content/work_dirs/config.py"
checkpoint_file = "/content/work_dirs/latest.pth"
gtf.Model_Params(config_file, checkpoint_file, use_gpu=True)
!wget https://www.dropbox.com/sh/75v5ehq4cdg5g5g/AABqY-3fJSmSMafFIlJXRE-9a/Anomaly-Videos-Part-4.zip?dl=0
!unzip /content/Anomaly-Videos-Part-4.zip?dl=0
!rm -r /content/Anomaly-Videos-Part-4.zip?dl=0
import os
for filename in os.listdir('/content/Anomaly-Videos-Part-4/Shoplifting'):
gtf.Predict('/content/Anomaly-Videos-Part-4/Shoplifting/'+filename)
print(r[0]+'ddd')
print('--------------------------------------------------------')
import os
for filename in os.listdir('/content/Anomaly-Videos-Part-4/Stealing'):
gtf.Predict('/content/Anomaly-Videos-Part-4/Stealing/'+filename)
print(r[0]+'ddd')
print('--------------------------------------------------------')
import os
for filename in os.listdir('/content/Anomaly-Videos-Part-4/Vandalism'):
gtf.Predict('/content/Anomaly-Videos-Part-4/Vandalism/'+filename)
print(r[0]+'ddd')
print('--------------------------------------------------------')
import os
for filename in os.listdir('/content/drive/MyDrive/Training-Normal-Videos-Part-1'):
gtf.Predict('/content/drive/MyDrive/Training-Normal-Videos-Part-1/'+filename)
print(r[0]+'ddd')
print('--------------------------------------------------------')
video_path = '/content/Anomaly-Videos-Part-4/Shoplifting/Shoplifting001_x264.mp4'
gtf.Predict(video_path)[0][0]
results
results[0][0]
import os
for filename in os.listdir('/content/Anomaly-Videos-Part-4/Vandalism'):
r = gtf.Predict('/content/Anomaly-Videos-Part-4/Vandalism/'+filename)[0][0]
print(r[0]+'ddd')
print('--------------------------------------------------------')
```
|
github_jupyter
|
! git clone https://github.com/Tessellate-Imaging/Monk_Object_Detection.git
! cd Monk_Object_Detection/18_mmaction/installation && cat requirements.txt | xargs -n 1 -L 1 pip install -vvvv
! pip install pillow==6.0.0
# Restart runtime
! cd Monk_Object_Detection/18_mmaction/lib/ && pip install -vvvv -e .
! pip uninstall -y mmcv
! cd Monk_Object_Detection/18_mmaction/lib/mmcv && MMCV_WITH_OPS=1 pip install -vvvv -e .
# Restart runtime
! mkdir Dataset
! mkdir Dataset/Videos
!mkdir Anomaly
from distutils.dir_util import copy_tree
# copy subdirectory example
fromDirectory = "/content/drive/MyDrive/Anomaly-Videos-Part-1/Arrest"
toDirectory = "/content/Anomaly"
copy_tree(fromDirectory, toDirectory)
fromDirectory = "/content/drive/MyDrive/Anomaly-Videos-Part-1/Assault"
toDirectory = "/content/Anomaly"
copy_tree(fromDirectory, toDirectory)
fromDirectory = "/content/drive/MyDrive/Anomaly-Videos-Part-2/Burglary"
toDirectory = "/content/Anomaly"
copy_tree(fromDirectory, toDirectory)
!mkdir Normal
fromDirectory = "/content/drive/MyDrive/Training-Normal-Videos-Part-1"
toDirectory = "/content/Normal"
copy_tree(fromDirectory, toDirectory)
fromDirectory = "/content/drive/MyDrive/Anomaly-Videos-Part-3/Robbery"
toDirectory = "/content/Anomaly"
copy_tree(fromDirectory, toDirectory)
import os
from random import sample
files = os.listdir('/content/Normal')
for file in sample(files,63):
os.remove('/content/Normal/' + file)
print("Class Anomaly: {}".format(len(os.listdir("/content/Anomaly"))))
print("Class Normal: {}".format(len(os.listdir("/content/Normal"))))
classes = ["Anomaly", "Normal"]
f = open("Dataset/classes.txt", 'w');
for i in range(len(classes)):
f.write(classes[i] + "\n");
f.close();
import os
from tqdm import tqdm
combined = [];
!mkdir All
!mv /content/Anomaly /content/All
!mv /content/Normal /content/All
folder_name = "/content/All/"
class_name = "Anomaly";
folder_name += class_name + "/";
vid_list = os.listdir(folder_name);
for i in tqdm(range(len(vid_list))):
os.system("cp " + folder_name + vid_list[i] + " Dataset/Videos/")
wr = vid_list[i] + " " + str(classes.index(class_name))
combined.append(wr)
folder_name = "/content/All/"
class_name = "Normal";
folder_name += class_name + "/";
vid_list = os.listdir(folder_name);
for i in tqdm(range(len(vid_list))):
os.system("cp " + folder_name + vid_list[i] + " Dataset/Videos/")
wr = vid_list[i] + " " + str(classes.index(class_name))
combined.append(wr)
import random
random.shuffle(combined)
train_list = combined[:100];
val_list = combined[100:];
f = open("Dataset/train.txt", 'w')
for i in range(len(train_list)):
f.write(train_list[i] + "\n");
f.close();
f = open("Dataset/val.txt", 'w')
for i in range(len(val_list)):
f.write(val_list[i] + "\n");
f.close();
import os
import sys
sys.path.append("Monk_Object_Detection/18_mmaction/lib");
from train_engine import Detector_Videos
gtf = Detector_Videos();
video_dir = 'Dataset/Videos/';
anno_file = 'Dataset/train.txt';
classes_list_file = 'Dataset/classes.txt';
gtf.Train_Video_Dataset(video_dir, anno_file, classes_list_file);
video_dir = 'Dataset/Videos/';
anno_file = 'Dataset/val.txt';
gtf.Val_Video_Dataset(video_dir, anno_file);
gtf.Dataset_Params(batch_size=8, num_workers=4)
gtf.List_Models();
gtf.Model_Params(model_name="tsn_r50", gpu_devices=[0])
gtf.Hyper_Params(lr=0.001, momentum=0.9, weight_decay=0.0001)
gtf.Training_Params(num_epochs=100, val_interval=10)
gtf.Train();
from infer_engine import Infer_Videos
import os
import sys
sys.path.append("Monk_Object_Detection/18_mmaction/lib");
gtf = Infer_Videos();
gtf.Dataset_Params('/content/Dataset/classes.txt');
config_file = "/content/work_dirs/config.py"
checkpoint_file = "/content/work_dirs/latest.pth"
gtf.Model_Params(config_file, checkpoint_file, use_gpu=True)
!wget https://www.dropbox.com/sh/75v5ehq4cdg5g5g/AABqY-3fJSmSMafFIlJXRE-9a/Anomaly-Videos-Part-4.zip?dl=0
!unzip /content/Anomaly-Videos-Part-4.zip?dl=0
!rm -r /content/Anomaly-Videos-Part-4.zip?dl=0
import os
for filename in os.listdir('/content/Anomaly-Videos-Part-4/Shoplifting'):
gtf.Predict('/content/Anomaly-Videos-Part-4/Shoplifting/'+filename)
print(r[0]+'ddd')
print('--------------------------------------------------------')
import os
for filename in os.listdir('/content/Anomaly-Videos-Part-4/Stealing'):
gtf.Predict('/content/Anomaly-Videos-Part-4/Stealing/'+filename)
print(r[0]+'ddd')
print('--------------------------------------------------------')
import os
for filename in os.listdir('/content/Anomaly-Videos-Part-4/Vandalism'):
gtf.Predict('/content/Anomaly-Videos-Part-4/Vandalism/'+filename)
print(r[0]+'ddd')
print('--------------------------------------------------------')
import os
for filename in os.listdir('/content/drive/MyDrive/Training-Normal-Videos-Part-1'):
gtf.Predict('/content/drive/MyDrive/Training-Normal-Videos-Part-1/'+filename)
print(r[0]+'ddd')
print('--------------------------------------------------------')
video_path = '/content/Anomaly-Videos-Part-4/Shoplifting/Shoplifting001_x264.mp4'
gtf.Predict(video_path)[0][0]
results
results[0][0]
import os
for filename in os.listdir('/content/Anomaly-Videos-Part-4/Vandalism'):
r = gtf.Predict('/content/Anomaly-Videos-Part-4/Vandalism/'+filename)[0][0]
print(r[0]+'ddd')
print('--------------------------------------------------------')
| 0.133232 | 0.553928 |
[View in Colaboratory](https://colab.research.google.com/github/davidkant/mai/blob/master/tutorial/4_2_Markov_Chains.ipynb)
# 4.2 Markov Chains
So how can we do better? Remember, music is a temporal art, and often the order in which things happens is meaningful. The answer: ***conditional probability.*** Conditional probability quantifies the likelihood that an outcome will occur *given that another outcome has already ocurred*. Conditional probability expresses the concept of contingency and is used to describe random processes that depend on something. With music, we often use conditional probability to express the realtionships between the future and past.
A ***Markov chain*** is a probability system in which the likelihood of *future* outcomes is determined by *past* outcomes. It's a tool for working with conditional probability, and, in this notebook, we'll use Markov chains to model sequence of musical events. Markov chains have two improtant features: the ***order*** which determines how far back past events affect future likelihoods, and the ***transition matrix*** which is a data structure that represents the system of conditional probabilitles.
Note: Markov chains are implements in the `mai` package, so this notebook is mostly about how to use it.
## Setup
```
# install external libraries
!pip install -q git+https://github.com/davidkant/mai#egg=mai;
!pip install -q pretty_midi
!pip install -q pyfluidsynth
!apt-get -qq update
!apt-get -qq install -y libfluidsynth1
# imports
import mai
import random
import matplotlib.pyplot as plt
```
## Learn a transition table from data
We are going to learn a markov chain transition matrix from an example sequence. Our first example is a short sequence of numbers:
```
# make some dummy music
music = [60, 62, 64, 65, 67]
```
First, we have to create a new markov chain to keep track of our data — this is the variable `mark`. Then we learn the transition table by calling the function `train` and passing our example musical sequence `music` as an argument.
```
# create a new markov chain
mark = mai.markov.Markov()
# learn a new table from data
mark.train(music, order=1)
```
View the transition table --- is this what you expected? The transition table counts the number of times we transition from each state to the next. For each entry of the table, the transition is to the left of the colon and the count is to the right. For instance, `((60,), 62): 1,` means the sequence transitions from the state `60` to the state `62` once.
```
# view the transition table
mark.transitions
```
## Generate a new musical sequence from the trained Markov model
This is the fun part. Once we have learned a Markov transition matrix, we can generate new sequences from it! The first thing we must do is set an initial state, otherwise the Markov chain wouldn't know where to start, then call the function `choose` to choose the next state according to the transition table.
```
# set initial state
mark.state = (60,)
# next choice
mark.choose()
```
You can continue to call choose as many times as you wish... until you get an error!!! NOTE: in the following cell we omit the line of code that sets the initial state because we do not want to resent our initial state each time.
```
# next choice
mark.choose()
```
If you call `choose` too many times on this particular transition table you eventually get the error: `LookupError: Current state not found in transition table`. The problem is we eventually transition *to* the state `67` but there is no entry in the transition table that tells us where to go *from* state `67`. This is because state `67` is the last number in the sequence that we learned from.
## Now let's try a few other sequences
### Example Sequence #2
What's different about this sequence? Here we add one additional value to our training sequence in order to avoid the error above. We create a loop where the sequence begins and ends on the same value, `60`.
```
# sequence to train on
music = [60, 62, 64, 65, 67, 60]
# create a new markov chain
mark = mai.markov.Markov()
# learn a new table from data
mark.train(music, order=1)
# view the transition table
mark.transitions
```
Set the initial state
```
# set initial state
mark.state = (60,)
```
Now choose repeatedly. You should **not** get an error this time. What happens instead?
```
# next choice
mark.choose()
```
Generate a sequence of 12 choices and plot it. The original sequence is in blue and the new sequence is in green. How do they compare? The new sequence is an exact copy of the original just repeated.
```
# plot original and new
plt.figure(figsize=(8,3))
plt.plot(music)
plt.plot([mark.choose() for x in range(12)])
plt.show()
```
### Example Sequence #3
Example sequence #2 loops the same sequence over and over and over again. It is deterministic because there is only one option for each previous state to transition to. How would you change the training sequence such that there are multiple optoins for at least one state?
```
# training sequence that is not deterministic
music = [60, 62, 64, 65, 67, 60, 67]
# create a new markov chain
mark = mai.markov.Markov()
# learn a new table from data
mark.train(music, order=1)
# view the transition table
mark.transitions
# set initial state
mark.state = (60,)
# next choice
mark.choose()
```
Generate a new sequence of 12 choices and plot it. The original sequence is in blue and the new sequence is in green. How do they compare? The new sequence is not (necessarily) an exact copy of the original, but more like cut up and rearranged segments of the original.
```
# plot original and new
plt.figure(figsize=(8,3))
plt.plot(music)
plt.plot([mark.choose() for x in range(12)])
plt.show()
```
Hopefully these simple sequence are helping your intuition develop, but let's move onto music now...
|
github_jupyter
|
# install external libraries
!pip install -q git+https://github.com/davidkant/mai#egg=mai;
!pip install -q pretty_midi
!pip install -q pyfluidsynth
!apt-get -qq update
!apt-get -qq install -y libfluidsynth1
# imports
import mai
import random
import matplotlib.pyplot as plt
# make some dummy music
music = [60, 62, 64, 65, 67]
# create a new markov chain
mark = mai.markov.Markov()
# learn a new table from data
mark.train(music, order=1)
# view the transition table
mark.transitions
# set initial state
mark.state = (60,)
# next choice
mark.choose()
# next choice
mark.choose()
# sequence to train on
music = [60, 62, 64, 65, 67, 60]
# create a new markov chain
mark = mai.markov.Markov()
# learn a new table from data
mark.train(music, order=1)
# view the transition table
mark.transitions
# set initial state
mark.state = (60,)
# next choice
mark.choose()
# plot original and new
plt.figure(figsize=(8,3))
plt.plot(music)
plt.plot([mark.choose() for x in range(12)])
plt.show()
# training sequence that is not deterministic
music = [60, 62, 64, 65, 67, 60, 67]
# create a new markov chain
mark = mai.markov.Markov()
# learn a new table from data
mark.train(music, order=1)
# view the transition table
mark.transitions
# set initial state
mark.state = (60,)
# next choice
mark.choose()
# plot original and new
plt.figure(figsize=(8,3))
plt.plot(music)
plt.plot([mark.choose() for x in range(12)])
plt.show()
| 0.490236 | 0.989013 |
# Gaia DR2 variability catalogs
### Part I: What's in them?
gully
May 2, 2018
```
# %load /Users/obsidian/Desktop/defaults.py
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
! du -hs ../data/dr2/Gaia/gdr2/vari_classifier_result/csv
df0 = pd.read_csv('../data/dr2/Gaia/gdr2/vari_classifier_result/csv/VariClassifierResult_0.csv.gz')
df0.shape
df0.head()
```
The catalog is not too long. We can just read in all the files and concatenate them
```
import glob
fns = glob.glob('../data/dr2/Gaia/gdr2/vari_classifier_result/csv/VariClassifierResult_*.csv.gz')
n_files = len(fns)
df_classifier = pd.DataFrame()
```
This step only takes 1 second:
```
for i, fn in enumerate(fns):
df_i = pd.read_csv(fn)
df_classifier = df_classifier.append(df_i, ignore_index=True)
df_classifier.shape
```
Ok, we have 363,969 classifications of variable stars, which matches exactly with the number presented in Table 1 of [Brown et al. 2018](https://www.aanda.org/component/article?access=doi&doi=10.1051/0004-6361/201833051). What are the categories?
```
df_classifier.best_class_name.value_counts()
```
Section 7.3.3 of the [Gaia DR2 Documentation](http://gea.esac.esa.int/archive/documentation/GDR2/Data_analysis/chap_cu7var/ssec_cu7var_sos_allsky/ssec_cu7var_allsky_proc.html) lists the classification code definition.
> The training set included objects of the classes targeted for publication in Gaia DR2 (listed in bold) as well as other types to reduce the contamination of the published classification results. The full list of object classes, with labels (used in the rest of this section) and corresponding descriptions, follows below.
20. **RRAB**: Fundamental-mode RR Lyrae stars.
16. **MIRA**: Long period variable stars of the o (omicron) Ceti type (Mira).
28. **SR**: Long period variable stars of the semiregular type.
21. **RRC**: First-overtone RR Lyrae stars.
10. **DSCT**: δ Scuti-type stars.
27. **SXPHE**: SX Phoenicis-type stars.
7. **CEP**: Classical (δ) Cepheids.
29. **T2CEP**: Type-II Cepheids.
22. **RRD**: Double-mode RR Lyrae stars.
1. **ACEP**: Anomalous Cepheids.
4. **ARRD**: Anomalous double-mode RR Lyrae stars.
The stars I'm interested in are not automatically classified, but are used in *training* the classifier that labels these stars. That's too bad, I'd like to see the lightcurves for these classes:
2. ACV: α2 Canum Venaticorum-type stars.
8. CONSTANT: Objects whose variations (or absence thereof) are consistent with those of constant sources (Section 7.2.3).
11. ECL: Eclipsing binary stars.
13. FLARES: Magnetically active stars displaying flares.
19. ROT: Rotation modulation in solar-like stars due to magnetic activity (spots).
23. RS: RS Canum Venaticorum-type stars.
24. SOLARLIKE: Stars with solar-like variability induced by magnetic activity (flares, spots, and rotational modulation).
Oh well, looks like these desired classifications may ellude us for now. What's the deal with the classifier name column? Section 7.3.1 of the Documentation explains:
> The results of this classification can be found in the Gaia DR2 archive in the classification table associated with the `nTransits:2+` classifier, although subsequent filtering [...] increased the minimum number of FoV transits to five
```
df_classifier.classifier_name.value_counts()
```
They all have the same entry, so let's drop this column.
```
df_classifier.drop(columns='classifier_name', inplace=True)
```
What is the distribution of best class scores?
```
df_classifier.best_class_score.hist(bins=20)
plt.xlim(1, 0)
plt.xlabel('Best Class Score')
plt.ylabel('$N$')
```
Many sources have best class scores close to 1. How do the classifications break down by Class?
```
df_summary = df_classifier.groupby('best_class_name').best_class_score.describe()
df_summary.style.format({key: "{:.0%}" for key in ['mean', 'std', 'min', '25%', '50%', '75%', 'max']})
```
Comparing the classification score between types might not be trivial, depending on what assumptions are made in the classifier (the prior probabilities vary drastically). For example, if you made a classifier that guessed `MIRA_SR`, you would be right 41% of the time. Finding annomalous Cepheids is a needle-in-a-haystack problem.
```
df_summary['parent_fraction'] = df_summary['count']/df_summary['count'].sum()
df_summary.style.format({key: "{:.0%}" for key in ['mean', 'std', 'min', '25%', '50%', '75%', 'max']}
).format({'parent_fraction': "{:.2%}"})
```
The next thing we could do is match the Gaia `Source ID` with the main catalog to get RA and DEC values to then cross-match with Kepler/K2.
|
github_jupyter
|
# %load /Users/obsidian/Desktop/defaults.py
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
! du -hs ../data/dr2/Gaia/gdr2/vari_classifier_result/csv
df0 = pd.read_csv('../data/dr2/Gaia/gdr2/vari_classifier_result/csv/VariClassifierResult_0.csv.gz')
df0.shape
df0.head()
import glob
fns = glob.glob('../data/dr2/Gaia/gdr2/vari_classifier_result/csv/VariClassifierResult_*.csv.gz')
n_files = len(fns)
df_classifier = pd.DataFrame()
for i, fn in enumerate(fns):
df_i = pd.read_csv(fn)
df_classifier = df_classifier.append(df_i, ignore_index=True)
df_classifier.shape
df_classifier.best_class_name.value_counts()
df_classifier.classifier_name.value_counts()
df_classifier.drop(columns='classifier_name', inplace=True)
df_classifier.best_class_score.hist(bins=20)
plt.xlim(1, 0)
plt.xlabel('Best Class Score')
plt.ylabel('$N$')
df_summary = df_classifier.groupby('best_class_name').best_class_score.describe()
df_summary.style.format({key: "{:.0%}" for key in ['mean', 'std', 'min', '25%', '50%', '75%', 'max']})
df_summary['parent_fraction'] = df_summary['count']/df_summary['count'].sum()
df_summary.style.format({key: "{:.0%}" for key in ['mean', 'std', 'min', '25%', '50%', '75%', 'max']}
).format({'parent_fraction': "{:.2%}"})
| 0.276202 | 0.892609 |
```
import lightgbm as lgb
import sklearn
from sklearn.feature_extraction.text import CountVectorizer,TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn.metrics import log_loss,confusion_matrix,classification_report,roc_curve,auc,accuracy_score,roc_auc_score
from sklearn.model_selection import StratifiedKFold
# Onehot encoding
from sklearn.preprocessing import LabelEncoder
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from scipy import sparse
import gc
import os
import random
def seed_everything(seed: int = 42):
random.seed(seed)
np.random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
#torch.manual_seed(seed)
#torch.cuda.manual_seed(seed)
#torch.backends.cudnn.deterministic = True
#torch.backends.cudnn.benchmark = False
seed_everything()
'''
!mkdir input/
%cd input/
!wget https://archive.ics.uci.edu/ml/machine-learning-databases/00359/NewsAggregatorDataset.zip
!unzip NewsAggregatorDataset.zip
'''
# 読込時のエラー回避のためダブルクォーテーションをシングルクォーテーションに置換
#!sed -e 's/"/'\''/g' ./input/newsCorpora.csv > ./input/newsCorpora_re.csv
#%cd ..
# データの読込
df = pd.read_csv('./input/newsCorpora_re.csv', header=None, sep='\t', names=['ID', 'TITLE', 'URL', 'PUBLISHER', 'CATEGORY', 'STORY', 'HOSTNAME', 'TIMESTAMP'])
# データの抽出
df = df.loc[df['PUBLISHER'].isin(['Reuters', 'Huffington Post', 'Businessweek', 'Contactmusic.com', 'Daily Mail']), ['TITLE', 'CATEGORY']]
df.head()
# データの分割
df_train, df_valid_test = train_test_split(df, test_size=0.2, shuffle=True, random_state=123, stratify=df['CATEGORY'])
df_valid, df_test = train_test_split(df_valid_test, test_size=0.5, shuffle=True, random_state=123, stratify=df_valid_test['CATEGORY'])
df_train.reset_index(drop=True, inplace=True)
df_valid.reset_index(drop=True, inplace=True)
df_test.reset_index(drop=True, inplace=True)
print(df_train.head())
vect_word = TfidfVectorizer(max_features=20000, lowercase=True, analyzer='word',
stop_words= None,ngram_range=(1,3),dtype=np.float32)
vect_char = TfidfVectorizer(max_features=40000, lowercase=True, analyzer='char',
stop_words=None,ngram_range=(3,6),dtype=np.float32)
# Word ngram vector
tr_vect = vect_word.fit_transform(df_train['TITLE'])
vl_vect = vect_word.transform(df_valid['TITLE'])
ts_vect = vect_word.transform(df_test['TITLE'])
# Character n gram vector
tr_vect_char = vect_char.fit_transform(df_train['TITLE'])
vl_vect_char = vect_char.transform(df_valid['TITLE'])
ts_vect_char = vect_char.transform(df_test['TITLE'])
gc.collect()
tr_vect.shape
X = sparse.hstack([tr_vect, tr_vect_char])
x_val = sparse.hstack([vl_vect, vl_vect_char])
x_test = sparse.hstack([ts_vect, ts_vect_char])
le = LabelEncoder()
y_tr = le.fit_transform(df_train['CATEGORY'].values)
y_vl = le.transform(df_valid['CATEGORY'].values)
y_te = le.transform(df_test['CATEGORY'].values)
svd = TruncatedSVD(n_components=300, random_state=42)
X = svd.fit_transform(tr_vect)
x_val = svd.transform(vl_vect)
x_test = svd.transform(ts_vect)
X.shape
y_vl.shape
y_tr.shape
model = lgb.LGBMClassifier()
model.fit(X, y_tr)
# 検証データを予測する
y_pred = model.predict_proba(x_val)
y_pred_max = np.argmax(y_pred, axis=1) # 最尤と判断したクラスの値にする
accuracy = sum(y_vl == y_pred_max) / len(y_vl)
print(accuracy)
print(roc_auc_score(y_vl, y_pred, multi_class='ovo'))
print(log_loss(y_vl, y_pred))
# 評価データを予測する
y_pred = model.predict_proba(x_test)
y_pred_max = np.argmax(y_pred, axis=1) # 最尤と判断したクラスの値にする
accuracy = sum(y_te == y_pred_max) / len(y_te)
print(accuracy)
print(roc_auc_score(y_te, y_pred, multi_class='ovo'))
print(log_loss(y_te, y_pred))
```
# Gensim
```
!pip freeze > requirements.lock
import gensim.downloader as api
wv = api.load('word2vec-google-news-300')
class SWEM():
"""
Simple Word-Embeddingbased Models (SWEM)
https://arxiv.org/abs/1805.09843v1
"""
def __init__(self, w2v, tokenizer, oov_initialize_range=(-0.01, 0.01)):
self.w2v = w2v
self.tokenizer = tokenizer
self.vocab = set(self.w2v.vocab.keys())
self.embedding_dim = self.w2v.vector_size
self.oov_initialize_range = oov_initialize_range
if self.oov_initialize_range[0] > self.oov_initialize_range[1]:
raise ValueError("Specify valid initialize range: "
f"[{self.oov_initialize_range[0]}, {self.oov_initialize_range[1]}]")
def get_word_embeddings(self, text):
np.random.seed(abs(hash(text)) % (10 ** 8))
vectors = []
for word in self.tokenizer(text):
if word in self.vocab:
vectors.append(self.w2v[word])
else:
vectors.append(np.random.uniform(self.oov_initialize_range[0],
self.oov_initialize_range[1],
self.embedding_dim))
return np.array(vectors)
def average_pooling(self, text):
word_embeddings = self.get_word_embeddings(text)
return np.mean(word_embeddings, axis=0)
def max_pooling(self, text):
word_embeddings = self.get_word_embeddings(text)
return np.max(word_embeddings, axis=0)
def concat_average_max_pooling(self, text):
word_embeddings = self.get_word_embeddings(text)
return np.r_[np.mean(word_embeddings, axis=0), np.max(word_embeddings, axis=0)]
def hierarchical_pooling(self, text, n):
word_embeddings = self.get_word_embeddings(text)
text_len = word_embeddings.shape[0]
if n > text_len:
raise ValueError(f"window size must be less than text length / window_size:{n} text_length:{text_len}")
window_average_pooling_vec = [np.mean(word_embeddings[i:i + n], axis=0) for i in range(text_len - n + 1)]
return np.max(window_average_pooling_vec, axis=0)
from gensim.models import KeyedVectors
from spacy.tokenizer import Tokenizer
from spacy.lang.en import English
nlp = English()
swem = SWEM(wv, nlp.tokenizer)
# Word ngram vector
tr_vect = np.array([swem.average_pooling(text) for text in df_train['TITLE'].tolist()])
vl_vect = np.array([swem.average_pooling(text) for text in df_valid['TITLE'].tolist()])
ts_vect = np.array([swem.average_pooling(text) for text in df_test['TITLE'].tolist()])
tr_vect.shape
y_tr.shape
pd.Series(y_tr).value_counts()
model = lgb.LGBMClassifier()
model.fit(tr_vect, y_tr)
# 検証データを予測する
y_pred = model.predict_proba(vl_vect)
y_pred_max = np.argmax(y_pred, axis=1) # 最尤と判断したクラスの値にする
accuracy = sum(y_vl == y_pred_max) / len(y_vl)
print('{:.4f}'.format(accuracy))
print('{:.4f}'.format(roc_auc_score(y_vl, y_pred, multi_class='ovo')))
print('{:.4f}'.format(log_loss(y_vl, y_pred)))
# 評価データを予測する
y_pred = model.predict_proba(ts_vect)
y_pred_max = np.argmax(y_pred, axis=1) # 最尤と判断したクラスの値にする
accuracy = sum(y_te == y_pred_max) / len(y_te)
print('{:.4f}'.format(accuracy))
print('{:.4f}'.format(roc_auc_score(y_te, y_pred, multi_class='ovo')))
print('{:.4f}'.format(log_loss(y_te, y_pred)))
```
# GloVe
```
# GloVeダウンロード
!wget https://nlp.stanford.edu/data/glove.6B.zip
!unzip glove.6B.zip
EMBEDDING_FILE='./glove.6B.300d.txt'
# Read the glove word vectors (space delimited strings) into a dictionary from word->vector.
def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32')
embeddings_index = dict(get_coefs(*o.strip().split()) for o in open(EMBEDDING_FILE))
from keras.preprocessing.text import Tokenizer
embedding_dict={}
with open(EMBEDDING_FILE,'r') as f:
for line in f:
values=line.split()
word=values[0]
vectors=np.asarray(values[1:],'float32')
embedding_dict[word]=vectors
f.close()
class SWEM_Glove():
"""
Simple Word-Embeddingbased Models (SWEM)
https://arxiv.org/abs/1805.09843v1
"""
def __init__(self, dic, tokenizer, oov_initialize_range=(-0.01, 0.01)):
self.tokenizer = tokenizer
self.dic = dic
self.embedding_dim = self.dic['a'].shape[0]
self.oov_initialize_range = oov_initialize_range
if self.oov_initialize_range[0] > self.oov_initialize_range[1]:
raise ValueError("Specify valid initialize range: "
f"[{self.oov_initialize_range[0]}, {self.oov_initialize_range[1]}]")
def get_word_embeddings(self, text):
np.random.seed(abs(hash(text)) % (10 ** 8))
vectors = []
for word in text.split():
if word in self.dic:
vectors.append(self.dic[word])
else:
vectors.append(np.random.uniform(self.oov_initialize_range[0],
self.oov_initialize_range[1],
self.embedding_dim))
return np.array(vectors)
def average_pooling(self, text):
word_embeddings = self.get_word_embeddings(text)
return np.mean(word_embeddings, axis=0)
swem = SWEM_Glove(embedding_dict, tokenizer)
# Word ngram vector
tr_vect = np.array([swem.average_pooling(text) for text in df_train['TITLE'].tolist()])
vl_vect = np.array([swem.average_pooling(text) for text in df_valid['TITLE'].tolist()])
ts_vect = np.array([swem.average_pooling(text) for text in df_test['TITLE'].tolist()])
model = lgb.LGBMClassifier()
model.fit(tr_vect, y_tr)
# 検証データを予測する
y_pred = model.predict_proba(vl_vect)
y_pred_max = np.argmax(y_pred, axis=1) # 最尤と判断したクラスの値にする
accuracy = sum(y_vl == y_pred_max) / len(y_vl)
print('{:.4f}'.format(accuracy))
print('{:.4f}'.format(roc_auc_score(y_vl, y_pred, multi_class='ovo')))
print('{:.4f}'.format(log_loss(y_vl, y_pred)))
# 評価データを予測する
y_pred = model.predict_proba(ts_vect)
y_pred_max = np.argmax(y_pred, axis=1) # 最尤と判断したクラスの値にする
accuracy = sum(y_te == y_pred_max) / len(y_te)
print('{:.4f}'.format(accuracy))
print('{:.4f}'.format(roc_auc_score(y_te, y_pred, multi_class='ovo')))
print('{:.4f}'.format(log_loss(y_te, y_pred)))
```
# FastText
```
from gensim.models import FastText
import fasttext
!ls input/
#model2 = FastText.load_fasttext_format('cc.en.300.bin')
FASTTEXT_MODEL_BIN = "input/cc.en.300.bin"
#this works
ft_model = fasttext.load_model(FASTTEXT_MODEL_BIN)
ft_model.get_word_vector("additional").shape
from keras.preprocessing.text import Tokenizer
tokenizer = Tokenizer()
class SWEM_FastText():
"""
Simple Word-Embeddingbased Models (SWEM)
https://arxiv.org/abs/1805.09843v1
"""
def __init__(self, dic, tokenizer, oov_initialize_range=(-0.01, 0.01)):
self.tokenizer = tokenizer
self.dic = dic
self.embedding_dim = self.dic['a'].shape[0]
self.oov_initialize_range = oov_initialize_range
if self.oov_initialize_range[0] > self.oov_initialize_range[1]:
raise ValueError("Specify valid initialize range: "
f"[{self.oov_initialize_range[0]}, {self.oov_initialize_range[1]}]")
def get_word_embeddings(self, text):
np.random.seed(abs(hash(text)) % (10 ** 8))
vectors = []
for word in text.split():
if word in self.dic:
vectors.append(self.dic[word])
else:
vectors.append(np.random.uniform(self.oov_initialize_range[0],
self.oov_initialize_range[1],
self.embedding_dim))
return np.array(vectors)
def average_pooling(self, text):
word_embeddings = self.get_word_embeddings(text)
return np.mean(word_embeddings, axis=0)
swem = SWEM_FastText(ft_model, tokenizer)
# Word ngram vector
tr_vect = np.array([swem.average_pooling(text) for text in df_train['TITLE'].tolist()])
vl_vect = np.array([swem.average_pooling(text) for text in df_valid['TITLE'].tolist()])
ts_vect = np.array([swem.average_pooling(text) for text in df_test['TITLE'].tolist()])
le = LabelEncoder()
y_tr = le.fit_transform(df_train['CATEGORY'].values)
y_vl = le.transform(df_valid['CATEGORY'].values)
y_te = le.transform(df_test['CATEGORY'].values)
model = lgb.LGBMClassifier()
model.fit(tr_vect, y_tr)
# 検証データを予測する
y_pred = model.predict_proba(vl_vect)
y_pred_max = np.argmax(y_pred, axis=1) # 最尤と判断したクラスの値にする
accuracy = sum(y_vl == y_pred_max) / len(y_vl)
print('{:.4f}'.format(accuracy))
print('{:.4f}'.format(roc_auc_score(y_vl, y_pred, multi_class='ovo')))
print('{:.4f}'.format(log_loss(y_vl, y_pred)))
# 評価データを予測する
y_pred = model.predict_proba(ts_vect)
y_pred_max = np.argmax(y_pred, axis=1) # 最尤と判断したクラスの値にする
accuracy = sum(y_te == y_pred_max) / len(y_te)
print('{:.4f}'.format(accuracy))
print('{:.4f}'.format(roc_auc_score(y_te, y_pred, multi_class='ovo')))
print('{:.4f}'.format(log_loss(y_te, y_pred)))
```
|
github_jupyter
|
import lightgbm as lgb
import sklearn
from sklearn.feature_extraction.text import CountVectorizer,TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn.metrics import log_loss,confusion_matrix,classification_report,roc_curve,auc,accuracy_score,roc_auc_score
from sklearn.model_selection import StratifiedKFold
# Onehot encoding
from sklearn.preprocessing import LabelEncoder
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from scipy import sparse
import gc
import os
import random
def seed_everything(seed: int = 42):
random.seed(seed)
np.random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
#torch.manual_seed(seed)
#torch.cuda.manual_seed(seed)
#torch.backends.cudnn.deterministic = True
#torch.backends.cudnn.benchmark = False
seed_everything()
'''
!mkdir input/
%cd input/
!wget https://archive.ics.uci.edu/ml/machine-learning-databases/00359/NewsAggregatorDataset.zip
!unzip NewsAggregatorDataset.zip
'''
# 読込時のエラー回避のためダブルクォーテーションをシングルクォーテーションに置換
#!sed -e 's/"/'\''/g' ./input/newsCorpora.csv > ./input/newsCorpora_re.csv
#%cd ..
# データの読込
df = pd.read_csv('./input/newsCorpora_re.csv', header=None, sep='\t', names=['ID', 'TITLE', 'URL', 'PUBLISHER', 'CATEGORY', 'STORY', 'HOSTNAME', 'TIMESTAMP'])
# データの抽出
df = df.loc[df['PUBLISHER'].isin(['Reuters', 'Huffington Post', 'Businessweek', 'Contactmusic.com', 'Daily Mail']), ['TITLE', 'CATEGORY']]
df.head()
# データの分割
df_train, df_valid_test = train_test_split(df, test_size=0.2, shuffle=True, random_state=123, stratify=df['CATEGORY'])
df_valid, df_test = train_test_split(df_valid_test, test_size=0.5, shuffle=True, random_state=123, stratify=df_valid_test['CATEGORY'])
df_train.reset_index(drop=True, inplace=True)
df_valid.reset_index(drop=True, inplace=True)
df_test.reset_index(drop=True, inplace=True)
print(df_train.head())
vect_word = TfidfVectorizer(max_features=20000, lowercase=True, analyzer='word',
stop_words= None,ngram_range=(1,3),dtype=np.float32)
vect_char = TfidfVectorizer(max_features=40000, lowercase=True, analyzer='char',
stop_words=None,ngram_range=(3,6),dtype=np.float32)
# Word ngram vector
tr_vect = vect_word.fit_transform(df_train['TITLE'])
vl_vect = vect_word.transform(df_valid['TITLE'])
ts_vect = vect_word.transform(df_test['TITLE'])
# Character n gram vector
tr_vect_char = vect_char.fit_transform(df_train['TITLE'])
vl_vect_char = vect_char.transform(df_valid['TITLE'])
ts_vect_char = vect_char.transform(df_test['TITLE'])
gc.collect()
tr_vect.shape
X = sparse.hstack([tr_vect, tr_vect_char])
x_val = sparse.hstack([vl_vect, vl_vect_char])
x_test = sparse.hstack([ts_vect, ts_vect_char])
le = LabelEncoder()
y_tr = le.fit_transform(df_train['CATEGORY'].values)
y_vl = le.transform(df_valid['CATEGORY'].values)
y_te = le.transform(df_test['CATEGORY'].values)
svd = TruncatedSVD(n_components=300, random_state=42)
X = svd.fit_transform(tr_vect)
x_val = svd.transform(vl_vect)
x_test = svd.transform(ts_vect)
X.shape
y_vl.shape
y_tr.shape
model = lgb.LGBMClassifier()
model.fit(X, y_tr)
# 検証データを予測する
y_pred = model.predict_proba(x_val)
y_pred_max = np.argmax(y_pred, axis=1) # 最尤と判断したクラスの値にする
accuracy = sum(y_vl == y_pred_max) / len(y_vl)
print(accuracy)
print(roc_auc_score(y_vl, y_pred, multi_class='ovo'))
print(log_loss(y_vl, y_pred))
# 評価データを予測する
y_pred = model.predict_proba(x_test)
y_pred_max = np.argmax(y_pred, axis=1) # 最尤と判断したクラスの値にする
accuracy = sum(y_te == y_pred_max) / len(y_te)
print(accuracy)
print(roc_auc_score(y_te, y_pred, multi_class='ovo'))
print(log_loss(y_te, y_pred))
!pip freeze > requirements.lock
import gensim.downloader as api
wv = api.load('word2vec-google-news-300')
class SWEM():
"""
Simple Word-Embeddingbased Models (SWEM)
https://arxiv.org/abs/1805.09843v1
"""
def __init__(self, w2v, tokenizer, oov_initialize_range=(-0.01, 0.01)):
self.w2v = w2v
self.tokenizer = tokenizer
self.vocab = set(self.w2v.vocab.keys())
self.embedding_dim = self.w2v.vector_size
self.oov_initialize_range = oov_initialize_range
if self.oov_initialize_range[0] > self.oov_initialize_range[1]:
raise ValueError("Specify valid initialize range: "
f"[{self.oov_initialize_range[0]}, {self.oov_initialize_range[1]}]")
def get_word_embeddings(self, text):
np.random.seed(abs(hash(text)) % (10 ** 8))
vectors = []
for word in self.tokenizer(text):
if word in self.vocab:
vectors.append(self.w2v[word])
else:
vectors.append(np.random.uniform(self.oov_initialize_range[0],
self.oov_initialize_range[1],
self.embedding_dim))
return np.array(vectors)
def average_pooling(self, text):
word_embeddings = self.get_word_embeddings(text)
return np.mean(word_embeddings, axis=0)
def max_pooling(self, text):
word_embeddings = self.get_word_embeddings(text)
return np.max(word_embeddings, axis=0)
def concat_average_max_pooling(self, text):
word_embeddings = self.get_word_embeddings(text)
return np.r_[np.mean(word_embeddings, axis=0), np.max(word_embeddings, axis=0)]
def hierarchical_pooling(self, text, n):
word_embeddings = self.get_word_embeddings(text)
text_len = word_embeddings.shape[0]
if n > text_len:
raise ValueError(f"window size must be less than text length / window_size:{n} text_length:{text_len}")
window_average_pooling_vec = [np.mean(word_embeddings[i:i + n], axis=0) for i in range(text_len - n + 1)]
return np.max(window_average_pooling_vec, axis=0)
from gensim.models import KeyedVectors
from spacy.tokenizer import Tokenizer
from spacy.lang.en import English
nlp = English()
swem = SWEM(wv, nlp.tokenizer)
# Word ngram vector
tr_vect = np.array([swem.average_pooling(text) for text in df_train['TITLE'].tolist()])
vl_vect = np.array([swem.average_pooling(text) for text in df_valid['TITLE'].tolist()])
ts_vect = np.array([swem.average_pooling(text) for text in df_test['TITLE'].tolist()])
tr_vect.shape
y_tr.shape
pd.Series(y_tr).value_counts()
model = lgb.LGBMClassifier()
model.fit(tr_vect, y_tr)
# 検証データを予測する
y_pred = model.predict_proba(vl_vect)
y_pred_max = np.argmax(y_pred, axis=1) # 最尤と判断したクラスの値にする
accuracy = sum(y_vl == y_pred_max) / len(y_vl)
print('{:.4f}'.format(accuracy))
print('{:.4f}'.format(roc_auc_score(y_vl, y_pred, multi_class='ovo')))
print('{:.4f}'.format(log_loss(y_vl, y_pred)))
# 評価データを予測する
y_pred = model.predict_proba(ts_vect)
y_pred_max = np.argmax(y_pred, axis=1) # 最尤と判断したクラスの値にする
accuracy = sum(y_te == y_pred_max) / len(y_te)
print('{:.4f}'.format(accuracy))
print('{:.4f}'.format(roc_auc_score(y_te, y_pred, multi_class='ovo')))
print('{:.4f}'.format(log_loss(y_te, y_pred)))
# GloVeダウンロード
!wget https://nlp.stanford.edu/data/glove.6B.zip
!unzip glove.6B.zip
EMBEDDING_FILE='./glove.6B.300d.txt'
# Read the glove word vectors (space delimited strings) into a dictionary from word->vector.
def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32')
embeddings_index = dict(get_coefs(*o.strip().split()) for o in open(EMBEDDING_FILE))
from keras.preprocessing.text import Tokenizer
embedding_dict={}
with open(EMBEDDING_FILE,'r') as f:
for line in f:
values=line.split()
word=values[0]
vectors=np.asarray(values[1:],'float32')
embedding_dict[word]=vectors
f.close()
class SWEM_Glove():
"""
Simple Word-Embeddingbased Models (SWEM)
https://arxiv.org/abs/1805.09843v1
"""
def __init__(self, dic, tokenizer, oov_initialize_range=(-0.01, 0.01)):
self.tokenizer = tokenizer
self.dic = dic
self.embedding_dim = self.dic['a'].shape[0]
self.oov_initialize_range = oov_initialize_range
if self.oov_initialize_range[0] > self.oov_initialize_range[1]:
raise ValueError("Specify valid initialize range: "
f"[{self.oov_initialize_range[0]}, {self.oov_initialize_range[1]}]")
def get_word_embeddings(self, text):
np.random.seed(abs(hash(text)) % (10 ** 8))
vectors = []
for word in text.split():
if word in self.dic:
vectors.append(self.dic[word])
else:
vectors.append(np.random.uniform(self.oov_initialize_range[0],
self.oov_initialize_range[1],
self.embedding_dim))
return np.array(vectors)
def average_pooling(self, text):
word_embeddings = self.get_word_embeddings(text)
return np.mean(word_embeddings, axis=0)
swem = SWEM_Glove(embedding_dict, tokenizer)
# Word ngram vector
tr_vect = np.array([swem.average_pooling(text) for text in df_train['TITLE'].tolist()])
vl_vect = np.array([swem.average_pooling(text) for text in df_valid['TITLE'].tolist()])
ts_vect = np.array([swem.average_pooling(text) for text in df_test['TITLE'].tolist()])
model = lgb.LGBMClassifier()
model.fit(tr_vect, y_tr)
# 検証データを予測する
y_pred = model.predict_proba(vl_vect)
y_pred_max = np.argmax(y_pred, axis=1) # 最尤と判断したクラスの値にする
accuracy = sum(y_vl == y_pred_max) / len(y_vl)
print('{:.4f}'.format(accuracy))
print('{:.4f}'.format(roc_auc_score(y_vl, y_pred, multi_class='ovo')))
print('{:.4f}'.format(log_loss(y_vl, y_pred)))
# 評価データを予測する
y_pred = model.predict_proba(ts_vect)
y_pred_max = np.argmax(y_pred, axis=1) # 最尤と判断したクラスの値にする
accuracy = sum(y_te == y_pred_max) / len(y_te)
print('{:.4f}'.format(accuracy))
print('{:.4f}'.format(roc_auc_score(y_te, y_pred, multi_class='ovo')))
print('{:.4f}'.format(log_loss(y_te, y_pred)))
from gensim.models import FastText
import fasttext
!ls input/
#model2 = FastText.load_fasttext_format('cc.en.300.bin')
FASTTEXT_MODEL_BIN = "input/cc.en.300.bin"
#this works
ft_model = fasttext.load_model(FASTTEXT_MODEL_BIN)
ft_model.get_word_vector("additional").shape
from keras.preprocessing.text import Tokenizer
tokenizer = Tokenizer()
class SWEM_FastText():
"""
Simple Word-Embeddingbased Models (SWEM)
https://arxiv.org/abs/1805.09843v1
"""
def __init__(self, dic, tokenizer, oov_initialize_range=(-0.01, 0.01)):
self.tokenizer = tokenizer
self.dic = dic
self.embedding_dim = self.dic['a'].shape[0]
self.oov_initialize_range = oov_initialize_range
if self.oov_initialize_range[0] > self.oov_initialize_range[1]:
raise ValueError("Specify valid initialize range: "
f"[{self.oov_initialize_range[0]}, {self.oov_initialize_range[1]}]")
def get_word_embeddings(self, text):
np.random.seed(abs(hash(text)) % (10 ** 8))
vectors = []
for word in text.split():
if word in self.dic:
vectors.append(self.dic[word])
else:
vectors.append(np.random.uniform(self.oov_initialize_range[0],
self.oov_initialize_range[1],
self.embedding_dim))
return np.array(vectors)
def average_pooling(self, text):
word_embeddings = self.get_word_embeddings(text)
return np.mean(word_embeddings, axis=0)
swem = SWEM_FastText(ft_model, tokenizer)
# Word ngram vector
tr_vect = np.array([swem.average_pooling(text) for text in df_train['TITLE'].tolist()])
vl_vect = np.array([swem.average_pooling(text) for text in df_valid['TITLE'].tolist()])
ts_vect = np.array([swem.average_pooling(text) for text in df_test['TITLE'].tolist()])
le = LabelEncoder()
y_tr = le.fit_transform(df_train['CATEGORY'].values)
y_vl = le.transform(df_valid['CATEGORY'].values)
y_te = le.transform(df_test['CATEGORY'].values)
model = lgb.LGBMClassifier()
model.fit(tr_vect, y_tr)
# 検証データを予測する
y_pred = model.predict_proba(vl_vect)
y_pred_max = np.argmax(y_pred, axis=1) # 最尤と判断したクラスの値にする
accuracy = sum(y_vl == y_pred_max) / len(y_vl)
print('{:.4f}'.format(accuracy))
print('{:.4f}'.format(roc_auc_score(y_vl, y_pred, multi_class='ovo')))
print('{:.4f}'.format(log_loss(y_vl, y_pred)))
# 評価データを予測する
y_pred = model.predict_proba(ts_vect)
y_pred_max = np.argmax(y_pred, axis=1) # 最尤と判断したクラスの値にする
accuracy = sum(y_te == y_pred_max) / len(y_te)
print('{:.4f}'.format(accuracy))
print('{:.4f}'.format(roc_auc_score(y_te, y_pred, multi_class='ovo')))
print('{:.4f}'.format(log_loss(y_te, y_pred)))
| 0.643217 | 0.366817 |
**Chapter 10 – Introduction to Artificial Neural Networks with Keras**
_This notebook contains all the sample code and solutions to the exercises in chapter 10._
<table align="left">
<td>
<a target="_blank" href="https://colab.research.google.com/github/ageron/handson-ml2/blob/master/10_neural_nets_with_keras.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
</table>
# Setup
First, let's import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures. We also check that Python 3.5 or later is installed (although Python 2.x may work, it is deprecated so we strongly recommend you use Python 3 instead), as well as Scikit-Learn ≥0.20 and TensorFlow ≥2.0.
```
# Python ≥3.5 is required
import sys
assert sys.version_info >= (3, 5)
# Scikit-Learn ≥0.20 is required
import sklearn
assert sklearn.__version__ >= "0.20"
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
# TensorFlow ≥2.0 is required
import tensorflow as tf
assert tf.__version__ >= "2.0"
# Common imports
import numpy as np
import os
# to make this notebook's output stable across runs
np.random.seed(42)
# To plot pretty figures
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "ann"
IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID)
os.makedirs(IMAGES_PATH, exist_ok=True)
def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300):
path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension)
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format=fig_extension, dpi=resolution)
```
# Perceptrons
**Note**: we set `max_iter` and `tol` explicitly to avoid warnings about the fact that their default value will change in future versions of Scikit-Learn.
```
import numpy as np
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
X = iris.data[:, (2, 3)] # petal length, petal width
y = (iris.target == 0).astype(np.int)
per_clf = Perceptron(max_iter=1000, tol=1e-3, random_state=42)
per_clf.fit(X, y)
y_pred = per_clf.predict([[2, 0.5]])
y_pred
a = -per_clf.coef_[0][0] / per_clf.coef_[0][1]
b = -per_clf.intercept_ / per_clf.coef_[0][1]
axes = [0, 5, 0, 2]
x0, x1 = np.meshgrid(
np.linspace(axes[0], axes[1], 500).reshape(-1, 1),
np.linspace(axes[2], axes[3], 200).reshape(-1, 1),
)
X_new = np.c_[x0.ravel(), x1.ravel()]
y_predict = per_clf.predict(X_new)
zz = y_predict.reshape(x0.shape)
plt.figure(figsize=(10, 4))
plt.plot(X[y==0, 0], X[y==0, 1], "bs", label="Not Iris-Setosa")
plt.plot(X[y==1, 0], X[y==1, 1], "yo", label="Iris-Setosa")
plt.plot([axes[0], axes[1]], [a * axes[0] + b, a * axes[1] + b], "k-", linewidth=3)
from matplotlib.colors import ListedColormap
custom_cmap = ListedColormap(['#9898ff', '#fafab0'])
plt.contourf(x0, x1, zz, cmap=custom_cmap)
plt.xlabel("Petal length", fontsize=14)
plt.ylabel("Petal width", fontsize=14)
plt.legend(loc="lower right", fontsize=14)
plt.axis(axes)
save_fig("perceptron_iris_plot")
plt.show()
```
# Activation functions
```
def sigmoid(z):
return 1 / (1 + np.exp(-z))
def relu(z):
return np.maximum(0, z)
def derivative(f, z, eps=0.000001):
return (f(z + eps) - f(z - eps))/(2 * eps)
z = np.linspace(-5, 5, 200)
plt.figure(figsize=(11,4))
plt.subplot(121)
plt.plot(z, np.sign(z), "r-", linewidth=1, label="Step")
plt.plot(z, sigmoid(z), "g--", linewidth=2, label="Sigmoid")
plt.plot(z, np.tanh(z), "b-", linewidth=2, label="Tanh")
plt.plot(z, relu(z), "m-.", linewidth=2, label="ReLU")
plt.grid(True)
plt.legend(loc="center right", fontsize=14)
plt.title("Activation functions", fontsize=14)
plt.axis([-5, 5, -1.2, 1.2])
plt.subplot(122)
plt.plot(z, derivative(np.sign, z), "r-", linewidth=1, label="Step")
plt.plot(0, 0, "ro", markersize=5)
plt.plot(0, 0, "rx", markersize=10)
plt.plot(z, derivative(sigmoid, z), "g--", linewidth=2, label="Sigmoid")
plt.plot(z, derivative(np.tanh, z), "b-", linewidth=2, label="Tanh")
plt.plot(z, derivative(relu, z), "m-.", linewidth=2, label="ReLU")
plt.grid(True)
#plt.legend(loc="center right", fontsize=14)
plt.title("Derivatives", fontsize=14)
plt.axis([-5, 5, -0.2, 1.2])
save_fig("activation_functions_plot")
plt.show()
def heaviside(z):
return (z >= 0).astype(z.dtype)
def mlp_xor(x1, x2, activation=heaviside):
return activation(-activation(x1 + x2 - 1.5) + activation(x1 + x2 - 0.5) - 0.5)
x1s = np.linspace(-0.2, 1.2, 100)
x2s = np.linspace(-0.2, 1.2, 100)
x1, x2 = np.meshgrid(x1s, x2s)
z1 = mlp_xor(x1, x2, activation=heaviside)
z2 = mlp_xor(x1, x2, activation=sigmoid)
plt.figure(figsize=(10,4))
plt.subplot(121)
plt.contourf(x1, x2, z1)
plt.plot([0, 1], [0, 1], "gs", markersize=20)
plt.plot([0, 1], [1, 0], "y^", markersize=20)
plt.title("Activation function: heaviside", fontsize=14)
plt.grid(True)
plt.subplot(122)
plt.contourf(x1, x2, z2)
plt.plot([0, 1], [0, 1], "gs", markersize=20)
plt.plot([0, 1], [1, 0], "y^", markersize=20)
plt.title("Activation function: sigmoid", fontsize=14)
plt.grid(True)
```
# Building an Image Classifier
First let's import TensorFlow and Keras.
```
import tensorflow as tf
from tensorflow import keras
tf.__version__
keras.__version__
```
Let's start by loading the fashion MNIST dataset. Keras has a number of functions to load popular datasets in `keras.datasets`. The dataset is already split for you between a training set and a test set, but it can be useful to split the training set further to have a validation set:
```
fashion_mnist = keras.datasets.fashion_mnist
(X_train_full, y_train_full), (X_test, y_test) = fashion_mnist.load_data()
```
The training set contains 60,000 grayscale images, each 28x28 pixels:
```
X_train_full.shape
```
Each pixel intensity is represented as a byte (0 to 255):
```
X_train_full.dtype
```
Let's split the full training set into a validation set and a (smaller) training set. We also scale the pixel intensities down to the 0-1 range and convert them to floats, by dividing by 255.
```
X_valid, X_train = X_train_full[:5000] / 255., X_train_full[5000:] / 255.
y_valid, y_train = y_train_full[:5000], y_train_full[5000:]
X_test = X_test / 255.
```
You can plot an image using Matplotlib's `imshow()` function, with a `'binary'`
color map:
```
plt.imshow(X_train[0], cmap="binary")
plt.axis('off')
plt.show()
```
The labels are the class IDs (represented as uint8), from 0 to 9:
```
y_train
```
Here are the corresponding class names:
```
class_names = ["T-shirt/top", "Trouser", "Pullover", "Dress", "Coat",
"Sandal", "Shirt", "Sneaker", "Bag", "Ankle boot"]
```
So the first image in the training set is a coat:
```
class_names[y_train[0]]
```
The validation set contains 5,000 images, and the test set contains 10,000 images:
```
X_valid.shape
X_test.shape
```
Let's take a look at a sample of the images in the dataset:
```
n_rows = 4
n_cols = 10
plt.figure(figsize=(n_cols * 1.2, n_rows * 1.2))
for row in range(n_rows):
for col in range(n_cols):
index = n_cols * row + col
plt.subplot(n_rows, n_cols, index + 1)
plt.imshow(X_train[index], cmap="binary", interpolation="nearest")
plt.axis('off')
plt.title(class_names[y_train[index]], fontsize=12)
plt.subplots_adjust(wspace=0.2, hspace=0.5)
save_fig('fashion_mnist_plot', tight_layout=False)
plt.show()
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[28, 28]))
model.add(keras.layers.Dense(300, activation="relu"))
model.add(keras.layers.Dense(100, activation="relu"))
model.add(keras.layers.Dense(10, activation="softmax"))
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="relu"),
keras.layers.Dense(100, activation="relu"),
keras.layers.Dense(10, activation="softmax")
])
model.layers
model.summary()
keras.utils.plot_model(model, "my_fashion_mnist_model.png", show_shapes=True)
hidden1 = model.layers[1]
hidden1.name
model.get_layer(hidden1.name) is hidden1
weights, biases = hidden1.get_weights()
weights
weights.shape
biases
biases.shape
model.compile(loss="sparse_categorical_crossentropy",
optimizer="sgd",
metrics=["accuracy"])
```
This is equivalent to:
```python
model.compile(loss=keras.losses.sparse_categorical_crossentropy,
optimizer=keras.optimizers.SGD(),
metrics=[keras.metrics.sparse_categorical_accuracy])
```
```
history = model.fit(X_train, y_train, epochs=30,
validation_data=(X_valid, y_valid))
history.params
print(history.epoch)
history.history.keys()
import pandas as pd
pd.DataFrame(history.history).plot(figsize=(8, 5))
plt.grid(True)
plt.gca().set_ylim(0, 1)
save_fig("keras_learning_curves_plot")
plt.show()
model.evaluate(X_test, y_test)
X_new = X_test[:3]
y_proba = model.predict(X_new)
y_proba.round(2)
```
**Warning**: `model.predict_classes(X_new)` is deprecated. It is replaced with `np.argmax(model.predict(X_new), axis=-1)`.
```
#y_pred = model.predict_classes(X_new) # deprecated
y_pred = np.argmax(model.predict(X_new), axis=-1)
y_pred
np.array(class_names)[y_pred]
y_new = y_test[:3]
y_new
plt.figure(figsize=(7.2, 2.4))
for index, image in enumerate(X_new):
plt.subplot(1, 3, index + 1)
plt.imshow(image, cmap="binary", interpolation="nearest")
plt.axis('off')
plt.title(class_names[y_test[index]], fontsize=12)
plt.subplots_adjust(wspace=0.2, hspace=0.5)
save_fig('fashion_mnist_images_plot', tight_layout=False)
plt.show()
```
# Regression MLP
Let's load, split and scale the California housing dataset (the original one, not the modified one as in chapter 2):
```
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
housing = fetch_california_housing()
X_train_full, X_test, y_train_full, y_test = train_test_split(housing.data, housing.target, random_state=42)
X_train, X_valid, y_train, y_valid = train_test_split(X_train_full, y_train_full, random_state=42)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_valid = scaler.transform(X_valid)
X_test = scaler.transform(X_test)
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Dense(30, activation="relu", input_shape=X_train.shape[1:]),
keras.layers.Dense(1)
])
model.compile(loss="mean_squared_error", optimizer=keras.optimizers.SGD(lr=1e-3))
history = model.fit(X_train, y_train, epochs=20, validation_data=(X_valid, y_valid))
mse_test = model.evaluate(X_test, y_test)
X_new = X_test[:3]
y_pred = model.predict(X_new)
plt.plot(pd.DataFrame(history.history))
plt.grid(True)
plt.gca().set_ylim(0, 1)
plt.show()
y_pred
```
# Functional API
Not all neural network models are simply sequential. Some may have complex topologies. Some may have multiple inputs and/or multiple outputs. For example, a Wide & Deep neural network (see [paper](https://ai.google/research/pubs/pub45413)) connects all or part of the inputs directly to the output layer.
```
np.random.seed(42)
tf.random.set_seed(42)
input_ = keras.layers.Input(shape=X_train.shape[1:])
hidden1 = keras.layers.Dense(30, activation="relu")(input_)
hidden2 = keras.layers.Dense(30, activation="relu")(hidden1)
concat = keras.layers.concatenate([input_, hidden2])
output = keras.layers.Dense(1)(concat)
model = keras.models.Model(inputs=[input_], outputs=[output])
model.summary()
model.compile(loss="mean_squared_error", optimizer=keras.optimizers.SGD(lr=1e-3))
history = model.fit(X_train, y_train, epochs=20,
validation_data=(X_valid, y_valid))
mse_test = model.evaluate(X_test, y_test)
y_pred = model.predict(X_new)
```
What if you want to send different subsets of input features through the wide or deep paths? We will send 5 features (features 0 to 4), and 6 through the deep path (features 2 to 7). Note that 3 features will go through both (features 2, 3 and 4).
```
np.random.seed(42)
tf.random.set_seed(42)
input_A = keras.layers.Input(shape=[5], name="wide_input")
input_B = keras.layers.Input(shape=[6], name="deep_input")
hidden1 = keras.layers.Dense(30, activation="relu")(input_B)
hidden2 = keras.layers.Dense(30, activation="relu")(hidden1)
concat = keras.layers.concatenate([input_A, hidden2])
output = keras.layers.Dense(1, name="output")(concat)
model = keras.models.Model(inputs=[input_A, input_B], outputs=[output])
model.compile(loss="mse", optimizer=keras.optimizers.SGD(lr=1e-3))
X_train_A, X_train_B = X_train[:, :5], X_train[:, 2:]
X_valid_A, X_valid_B = X_valid[:, :5], X_valid[:, 2:]
X_test_A, X_test_B = X_test[:, :5], X_test[:, 2:]
X_new_A, X_new_B = X_test_A[:3], X_test_B[:3]
history = model.fit((X_train_A, X_train_B), y_train, epochs=20,
validation_data=((X_valid_A, X_valid_B), y_valid))
mse_test = model.evaluate((X_test_A, X_test_B), y_test)
y_pred = model.predict((X_new_A, X_new_B))
```
Adding an auxiliary output for regularization:
```
np.random.seed(42)
tf.random.set_seed(42)
input_A = keras.layers.Input(shape=[5], name="wide_input")
input_B = keras.layers.Input(shape=[6], name="deep_input")
hidden1 = keras.layers.Dense(30, activation="relu")(input_B)
hidden2 = keras.layers.Dense(30, activation="relu")(hidden1)
concat = keras.layers.concatenate([input_A, hidden2])
output = keras.layers.Dense(1, name="main_output")(concat)
aux_output = keras.layers.Dense(1, name="aux_output")(hidden2)
model = keras.models.Model(inputs=[input_A, input_B],
outputs=[output, aux_output])
model.compile(loss=["mse", "mse"], loss_weights=[0.9, 0.1], optimizer=keras.optimizers.SGD(lr=1e-3))
history = model.fit([X_train_A, X_train_B], [y_train, y_train], epochs=20,
validation_data=([X_valid_A, X_valid_B], [y_valid, y_valid]))
total_loss, main_loss, aux_loss = model.evaluate(
[X_test_A, X_test_B], [y_test, y_test])
y_pred_main, y_pred_aux = model.predict([X_new_A, X_new_B])
```
# The subclassing API
```
class WideAndDeepModel(keras.models.Model):
def __init__(self, units=30, activation="relu", **kwargs):
super().__init__(**kwargs)
self.hidden1 = keras.layers.Dense(units, activation=activation)
self.hidden2 = keras.layers.Dense(units, activation=activation)
self.main_output = keras.layers.Dense(1)
self.aux_output = keras.layers.Dense(1)
def call(self, inputs):
input_A, input_B = inputs
hidden1 = self.hidden1(input_B)
hidden2 = self.hidden2(hidden1)
concat = keras.layers.concatenate([input_A, hidden2])
main_output = self.main_output(concat)
aux_output = self.aux_output(hidden2)
return main_output, aux_output
model = WideAndDeepModel(30, activation="relu")
model.compile(loss="mse", loss_weights=[0.9, 0.1], optimizer=keras.optimizers.SGD(lr=1e-3))
history = model.fit((X_train_A, X_train_B), (y_train, y_train), epochs=10,
validation_data=((X_valid_A, X_valid_B), (y_valid, y_valid)))
total_loss, main_loss, aux_loss = model.evaluate((X_test_A, X_test_B), (y_test, y_test))
y_pred_main, y_pred_aux = model.predict((X_new_A, X_new_B))
```
# Saving and Restoring
```
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Dense(30, activation="relu", input_shape=[8]),
keras.layers.Dense(30, activation="relu"),
keras.layers.Dense(1)
])
model.compile(loss="mse", optimizer=keras.optimizers.SGD(lr=1e-3))
history = model.fit(X_train, y_train, epochs=10, validation_data=(X_valid, y_valid))
mse_test = model.evaluate(X_test, y_test)
model.save("my_keras_model.h5")
model = keras.models.load_model("my_keras_model.h5")
model.predict(X_new)
model.save_weights("my_keras_weights.ckpt")
model.load_weights("my_keras_weights.ckpt")
```
# Using Callbacks during Training
```
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Dense(30, activation="relu", input_shape=[8]),
keras.layers.Dense(30, activation="relu"),
keras.layers.Dense(1)
])
model.compile(loss="mse", optimizer=keras.optimizers.SGD(lr=1e-3))
checkpoint_cb = keras.callbacks.ModelCheckpoint("my_keras_model.h5", save_best_only=True)
history = model.fit(X_train, y_train, epochs=10,
validation_data=(X_valid, y_valid),
callbacks=[checkpoint_cb])
model = keras.models.load_model("my_keras_model.h5") # rollback to best model
mse_test = model.evaluate(X_test, y_test)
model.compile(loss="mse", optimizer=keras.optimizers.SGD(lr=1e-3))
early_stopping_cb = keras.callbacks.EarlyStopping(patience=10,
restore_best_weights=True)
history = model.fit(X_train, y_train, epochs=100,
validation_data=(X_valid, y_valid),
callbacks=[checkpoint_cb, early_stopping_cb])
mse_test = model.evaluate(X_test, y_test)
class PrintValTrainRatioCallback(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
print("\nval/train: {:.2f}".format(logs["val_loss"] / logs["loss"]))
val_train_ratio_cb = PrintValTrainRatioCallback()
history = model.fit(X_train, y_train, epochs=1,
validation_data=(X_valid, y_valid),
callbacks=[val_train_ratio_cb])
```
# TensorBoard
```
root_logdir = os.path.join(os.curdir, "my_logs")
def get_run_logdir():
import time
run_id = time.strftime("run_%Y_%m_%d-%H_%M_%S")
return os.path.join(root_logdir, run_id)
run_logdir = get_run_logdir()
run_logdir
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Dense(30, activation="relu", input_shape=[8]),
keras.layers.Dense(30, activation="relu"),
keras.layers.Dense(1)
])
model.compile(loss="mse", optimizer=keras.optimizers.SGD(lr=1e-3))
tensorboard_cb = keras.callbacks.TensorBoard(run_logdir)
history = model.fit(X_train, y_train, epochs=30,
validation_data=(X_valid, y_valid),
callbacks=[checkpoint_cb, tensorboard_cb])
```
To start the TensorBoard server, one option is to open a terminal, if needed activate the virtualenv where you installed TensorBoard, go to this notebook's directory, then type:
```bash
$ tensorboard --logdir=./my_logs --port=6006
```
You can then open your web browser to [localhost:6006](http://localhost:6006) and use TensorBoard. Once you are done, press Ctrl-C in the terminal window, this will shutdown the TensorBoard server.
Alternatively, you can load TensorBoard's Jupyter extension and run it like this:
```
%load_ext tensorboard
%tensorboard --logdir=./my_logs --port=6006
run_logdir2 = get_run_logdir()
run_logdir2
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Dense(30, activation="relu", input_shape=[8]),
keras.layers.Dense(30, activation="relu"),
keras.layers.Dense(1)
])
model.compile(loss="mse", optimizer=keras.optimizers.SGD(lr=0.05))
tensorboard_cb = keras.callbacks.TensorBoard(run_logdir2)
history = model.fit(X_train, y_train, epochs=30,
validation_data=(X_valid, y_valid),
callbacks=[checkpoint_cb, tensorboard_cb])
```
Notice how TensorBoard now sees two runs, and you can compare the learning curves.
Check out the other available logging options:
```
help(keras.callbacks.TensorBoard.__init__)
```
# Hyperparameter Tuning
```
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
def build_model(n_hidden=1, n_neurons=30, learning_rate=3e-3, input_shape=[8]):
model = keras.models.Sequential()
model.add(keras.layers.InputLayer(input_shape=input_shape))
for layer in range(n_hidden):
model.add(keras.layers.Dense(n_neurons, activation="relu"))
model.add(keras.layers.Dense(1))
optimizer = keras.optimizers.SGD(lr=learning_rate)
model.compile(loss="mse", optimizer=optimizer)
return model
keras_reg = keras.wrappers.scikit_learn.KerasRegressor(build_model)
keras_reg.fit(X_train, y_train, epochs=100,
validation_data=(X_valid, y_valid),
callbacks=[keras.callbacks.EarlyStopping(patience=10)])
mse_test = keras_reg.score(X_test, y_test)
y_pred = keras_reg.predict(X_new)
np.random.seed(42)
tf.random.set_seed(42)
```
**Warning**: the following cell crashes at the end of training. This seems to be caused by [Keras issue #13586](https://github.com/keras-team/keras/issues/13586), which was triggered by a recent change in Scikit-Learn. [Pull Request #13598](https://github.com/keras-team/keras/pull/13598) seems to fix the issue, so this problem should be resolved soon. In the meantime, I've added `.tolist()` and `.rvs(1000).tolist()` as workarounds.
```
from scipy.stats import reciprocal
from sklearn.model_selection import RandomizedSearchCV
param_distribs = {
"n_hidden": [0, 1, 2, 3],
"n_neurons": np.arange(1, 100) .tolist(),
"learning_rate": reciprocal(3e-4, 3e-2) .rvs(1000).tolist(),
}
rnd_search_cv = RandomizedSearchCV(keras_reg, param_distribs, n_iter=10, cv=3, verbose=2)
rnd_search_cv.fit(X_train, y_train, epochs=100,
validation_data=(X_valid, y_valid),
callbacks=[keras.callbacks.EarlyStopping(patience=10)])
rnd_search_cv.best_params_
rnd_search_cv.best_score_
rnd_search_cv.best_estimator_
rnd_search_cv.score(X_test, y_test)
model = rnd_search_cv.best_estimator_.model
model
model.evaluate(X_test, y_test)
```
# Exercise solutions
## 1. to 9.
See appendix A.
## 10.
*Exercise: Train a deep MLP on the MNIST dataset (you can load it using `keras.datasets.mnist.load_data()`. See if you can get over 98% precision. Try searching for the optimal learning rate by using the approach presented in this chapter (i.e., by growing the learning rate exponentially, plotting the loss, and finding the point where the loss shoots up). Try adding all the bells and whistles—save checkpoints, use early stopping, and plot learning curves using TensorBoard.*
Let's load the dataset:
```
(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.mnist.load_data()
```
Just like for the Fashion MNIST dataset, the MNIST training set contains 60,000 grayscale images, each 28x28 pixels:
```
X_train_full.shape
```
Each pixel intensity is also represented as a byte (0 to 255):
```
X_train_full.dtype
```
Let's split the full training set into a validation set and a (smaller) training set. We also scale the pixel intensities down to the 0-1 range and convert them to floats, by dividing by 255, just like we did for Fashion MNIST:
```
X_valid, X_train = X_train_full[:5000] / 255., X_train_full[5000:] / 255.
y_valid, y_train = y_train_full[:5000], y_train_full[5000:]
X_test = X_test / 255.
```
Let's plot an image using Matplotlib's `imshow()` function, with a `'binary'`
color map:
```
plt.imshow(X_train[0], cmap="binary")
plt.axis('off')
plt.show()
```
The labels are the class IDs (represented as uint8), from 0 to 9. Conveniently, the class IDs correspond to the digits represented in the images, so we don't need a `class_names` array:
```
y_train
```
The validation set contains 5,000 images, and the test set contains 10,000 images:
```
X_valid.shape
X_test.shape
```
Let's take a look at a sample of the images in the dataset:
```
n_rows = 4
n_cols = 10
plt.figure(figsize=(n_cols * 1.2, n_rows * 1.2))
for row in range(n_rows):
for col in range(n_cols):
index = n_cols * row + col
plt.subplot(n_rows, n_cols, index + 1)
plt.imshow(X_train[index], cmap="binary", interpolation="nearest")
plt.axis('off')
plt.title(y_train[index], fontsize=12)
plt.subplots_adjust(wspace=0.2, hspace=0.5)
plt.show()
```
Let's build a simple dense network and find the optimal learning rate. We will need a callback to grow the learning rate at each iteration. It will also record the learning rate and the loss at each iteration:
```
K = keras.backend
class ExponentialLearningRate(keras.callbacks.Callback):
def __init__(self, factor):
self.factor = factor
self.rates = []
self.losses = []
def on_batch_end(self, batch, logs):
self.rates.append(K.get_value(self.model.optimizer.lr))
self.losses.append(logs["loss"])
K.set_value(self.model.optimizer.lr, self.model.optimizer.lr * self.factor)
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="relu"),
keras.layers.Dense(100, activation="relu"),
keras.layers.Dense(10, activation="softmax")
])
```
We will start with a small learning rate of 1e-3, and grow it by 0.5% at each iteration:
```
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
expon_lr = ExponentialLearningRate(factor=1.005)
```
Now let's train the model for just 1 epoch:
```
history = model.fit(X_train, y_train, epochs=1,
validation_data=(X_valid, y_valid),
callbacks=[expon_lr])
```
We can now plot the loss as a functionof the learning rate:
```
plt.plot(expon_lr.rates, expon_lr.losses)
plt.gca().set_xscale('log')
plt.hlines(min(expon_lr.losses), min(expon_lr.rates), max(expon_lr.rates))
plt.axis([min(expon_lr.rates), max(expon_lr.rates), 0, expon_lr.losses[0]])
plt.grid()
plt.xlabel("Learning rate")
plt.ylabel("Loss")
```
The loss starts shooting back up violently when the learning rate goes over 6e-1, so let's try using half of that, at 3e-1:
```
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="relu"),
keras.layers.Dense(100, activation="relu"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=3e-1),
metrics=["accuracy"])
run_index = 1 # increment this at every run
run_logdir = os.path.join(os.curdir, "my_mnist_logs", "run_{:03d}".format(run_index))
run_logdir
early_stopping_cb = keras.callbacks.EarlyStopping(patience=20)
checkpoint_cb = keras.callbacks.ModelCheckpoint("my_mnist_model.h5", save_best_only=True)
tensorboard_cb = keras.callbacks.TensorBoard(run_logdir)
history = model.fit(X_train, y_train, epochs=100,
validation_data=(X_valid, y_valid),
callbacks=[checkpoint_cb, early_stopping_cb, tensorboard_cb])
model = keras.models.load_model("my_mnist_model.h5") # rollback to best model
model.evaluate(X_test, y_test)
```
We got over 98% accuracy. Finally, let's look at the learning curves using TensorBoard:
```
%tensorboard --logdir=./my_mnist_logs --port=6006
```
|
github_jupyter
|
# Python ≥3.5 is required
import sys
assert sys.version_info >= (3, 5)
# Scikit-Learn ≥0.20 is required
import sklearn
assert sklearn.__version__ >= "0.20"
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
# TensorFlow ≥2.0 is required
import tensorflow as tf
assert tf.__version__ >= "2.0"
# Common imports
import numpy as np
import os
# to make this notebook's output stable across runs
np.random.seed(42)
# To plot pretty figures
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "ann"
IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID)
os.makedirs(IMAGES_PATH, exist_ok=True)
def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300):
path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension)
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format=fig_extension, dpi=resolution)
import numpy as np
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
X = iris.data[:, (2, 3)] # petal length, petal width
y = (iris.target == 0).astype(np.int)
per_clf = Perceptron(max_iter=1000, tol=1e-3, random_state=42)
per_clf.fit(X, y)
y_pred = per_clf.predict([[2, 0.5]])
y_pred
a = -per_clf.coef_[0][0] / per_clf.coef_[0][1]
b = -per_clf.intercept_ / per_clf.coef_[0][1]
axes = [0, 5, 0, 2]
x0, x1 = np.meshgrid(
np.linspace(axes[0], axes[1], 500).reshape(-1, 1),
np.linspace(axes[2], axes[3], 200).reshape(-1, 1),
)
X_new = np.c_[x0.ravel(), x1.ravel()]
y_predict = per_clf.predict(X_new)
zz = y_predict.reshape(x0.shape)
plt.figure(figsize=(10, 4))
plt.plot(X[y==0, 0], X[y==0, 1], "bs", label="Not Iris-Setosa")
plt.plot(X[y==1, 0], X[y==1, 1], "yo", label="Iris-Setosa")
plt.plot([axes[0], axes[1]], [a * axes[0] + b, a * axes[1] + b], "k-", linewidth=3)
from matplotlib.colors import ListedColormap
custom_cmap = ListedColormap(['#9898ff', '#fafab0'])
plt.contourf(x0, x1, zz, cmap=custom_cmap)
plt.xlabel("Petal length", fontsize=14)
plt.ylabel("Petal width", fontsize=14)
plt.legend(loc="lower right", fontsize=14)
plt.axis(axes)
save_fig("perceptron_iris_plot")
plt.show()
def sigmoid(z):
return 1 / (1 + np.exp(-z))
def relu(z):
return np.maximum(0, z)
def derivative(f, z, eps=0.000001):
return (f(z + eps) - f(z - eps))/(2 * eps)
z = np.linspace(-5, 5, 200)
plt.figure(figsize=(11,4))
plt.subplot(121)
plt.plot(z, np.sign(z), "r-", linewidth=1, label="Step")
plt.plot(z, sigmoid(z), "g--", linewidth=2, label="Sigmoid")
plt.plot(z, np.tanh(z), "b-", linewidth=2, label="Tanh")
plt.plot(z, relu(z), "m-.", linewidth=2, label="ReLU")
plt.grid(True)
plt.legend(loc="center right", fontsize=14)
plt.title("Activation functions", fontsize=14)
plt.axis([-5, 5, -1.2, 1.2])
plt.subplot(122)
plt.plot(z, derivative(np.sign, z), "r-", linewidth=1, label="Step")
plt.plot(0, 0, "ro", markersize=5)
plt.plot(0, 0, "rx", markersize=10)
plt.plot(z, derivative(sigmoid, z), "g--", linewidth=2, label="Sigmoid")
plt.plot(z, derivative(np.tanh, z), "b-", linewidth=2, label="Tanh")
plt.plot(z, derivative(relu, z), "m-.", linewidth=2, label="ReLU")
plt.grid(True)
#plt.legend(loc="center right", fontsize=14)
plt.title("Derivatives", fontsize=14)
plt.axis([-5, 5, -0.2, 1.2])
save_fig("activation_functions_plot")
plt.show()
def heaviside(z):
return (z >= 0).astype(z.dtype)
def mlp_xor(x1, x2, activation=heaviside):
return activation(-activation(x1 + x2 - 1.5) + activation(x1 + x2 - 0.5) - 0.5)
x1s = np.linspace(-0.2, 1.2, 100)
x2s = np.linspace(-0.2, 1.2, 100)
x1, x2 = np.meshgrid(x1s, x2s)
z1 = mlp_xor(x1, x2, activation=heaviside)
z2 = mlp_xor(x1, x2, activation=sigmoid)
plt.figure(figsize=(10,4))
plt.subplot(121)
plt.contourf(x1, x2, z1)
plt.plot([0, 1], [0, 1], "gs", markersize=20)
plt.plot([0, 1], [1, 0], "y^", markersize=20)
plt.title("Activation function: heaviside", fontsize=14)
plt.grid(True)
plt.subplot(122)
plt.contourf(x1, x2, z2)
plt.plot([0, 1], [0, 1], "gs", markersize=20)
plt.plot([0, 1], [1, 0], "y^", markersize=20)
plt.title("Activation function: sigmoid", fontsize=14)
plt.grid(True)
import tensorflow as tf
from tensorflow import keras
tf.__version__
keras.__version__
fashion_mnist = keras.datasets.fashion_mnist
(X_train_full, y_train_full), (X_test, y_test) = fashion_mnist.load_data()
X_train_full.shape
X_train_full.dtype
X_valid, X_train = X_train_full[:5000] / 255., X_train_full[5000:] / 255.
y_valid, y_train = y_train_full[:5000], y_train_full[5000:]
X_test = X_test / 255.
plt.imshow(X_train[0], cmap="binary")
plt.axis('off')
plt.show()
y_train
class_names = ["T-shirt/top", "Trouser", "Pullover", "Dress", "Coat",
"Sandal", "Shirt", "Sneaker", "Bag", "Ankle boot"]
class_names[y_train[0]]
X_valid.shape
X_test.shape
n_rows = 4
n_cols = 10
plt.figure(figsize=(n_cols * 1.2, n_rows * 1.2))
for row in range(n_rows):
for col in range(n_cols):
index = n_cols * row + col
plt.subplot(n_rows, n_cols, index + 1)
plt.imshow(X_train[index], cmap="binary", interpolation="nearest")
plt.axis('off')
plt.title(class_names[y_train[index]], fontsize=12)
plt.subplots_adjust(wspace=0.2, hspace=0.5)
save_fig('fashion_mnist_plot', tight_layout=False)
plt.show()
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[28, 28]))
model.add(keras.layers.Dense(300, activation="relu"))
model.add(keras.layers.Dense(100, activation="relu"))
model.add(keras.layers.Dense(10, activation="softmax"))
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="relu"),
keras.layers.Dense(100, activation="relu"),
keras.layers.Dense(10, activation="softmax")
])
model.layers
model.summary()
keras.utils.plot_model(model, "my_fashion_mnist_model.png", show_shapes=True)
hidden1 = model.layers[1]
hidden1.name
model.get_layer(hidden1.name) is hidden1
weights, biases = hidden1.get_weights()
weights
weights.shape
biases
biases.shape
model.compile(loss="sparse_categorical_crossentropy",
optimizer="sgd",
metrics=["accuracy"])
model.compile(loss=keras.losses.sparse_categorical_crossentropy,
optimizer=keras.optimizers.SGD(),
metrics=[keras.metrics.sparse_categorical_accuracy])
history = model.fit(X_train, y_train, epochs=30,
validation_data=(X_valid, y_valid))
history.params
print(history.epoch)
history.history.keys()
import pandas as pd
pd.DataFrame(history.history).plot(figsize=(8, 5))
plt.grid(True)
plt.gca().set_ylim(0, 1)
save_fig("keras_learning_curves_plot")
plt.show()
model.evaluate(X_test, y_test)
X_new = X_test[:3]
y_proba = model.predict(X_new)
y_proba.round(2)
#y_pred = model.predict_classes(X_new) # deprecated
y_pred = np.argmax(model.predict(X_new), axis=-1)
y_pred
np.array(class_names)[y_pred]
y_new = y_test[:3]
y_new
plt.figure(figsize=(7.2, 2.4))
for index, image in enumerate(X_new):
plt.subplot(1, 3, index + 1)
plt.imshow(image, cmap="binary", interpolation="nearest")
plt.axis('off')
plt.title(class_names[y_test[index]], fontsize=12)
plt.subplots_adjust(wspace=0.2, hspace=0.5)
save_fig('fashion_mnist_images_plot', tight_layout=False)
plt.show()
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
housing = fetch_california_housing()
X_train_full, X_test, y_train_full, y_test = train_test_split(housing.data, housing.target, random_state=42)
X_train, X_valid, y_train, y_valid = train_test_split(X_train_full, y_train_full, random_state=42)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_valid = scaler.transform(X_valid)
X_test = scaler.transform(X_test)
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Dense(30, activation="relu", input_shape=X_train.shape[1:]),
keras.layers.Dense(1)
])
model.compile(loss="mean_squared_error", optimizer=keras.optimizers.SGD(lr=1e-3))
history = model.fit(X_train, y_train, epochs=20, validation_data=(X_valid, y_valid))
mse_test = model.evaluate(X_test, y_test)
X_new = X_test[:3]
y_pred = model.predict(X_new)
plt.plot(pd.DataFrame(history.history))
plt.grid(True)
plt.gca().set_ylim(0, 1)
plt.show()
y_pred
np.random.seed(42)
tf.random.set_seed(42)
input_ = keras.layers.Input(shape=X_train.shape[1:])
hidden1 = keras.layers.Dense(30, activation="relu")(input_)
hidden2 = keras.layers.Dense(30, activation="relu")(hidden1)
concat = keras.layers.concatenate([input_, hidden2])
output = keras.layers.Dense(1)(concat)
model = keras.models.Model(inputs=[input_], outputs=[output])
model.summary()
model.compile(loss="mean_squared_error", optimizer=keras.optimizers.SGD(lr=1e-3))
history = model.fit(X_train, y_train, epochs=20,
validation_data=(X_valid, y_valid))
mse_test = model.evaluate(X_test, y_test)
y_pred = model.predict(X_new)
np.random.seed(42)
tf.random.set_seed(42)
input_A = keras.layers.Input(shape=[5], name="wide_input")
input_B = keras.layers.Input(shape=[6], name="deep_input")
hidden1 = keras.layers.Dense(30, activation="relu")(input_B)
hidden2 = keras.layers.Dense(30, activation="relu")(hidden1)
concat = keras.layers.concatenate([input_A, hidden2])
output = keras.layers.Dense(1, name="output")(concat)
model = keras.models.Model(inputs=[input_A, input_B], outputs=[output])
model.compile(loss="mse", optimizer=keras.optimizers.SGD(lr=1e-3))
X_train_A, X_train_B = X_train[:, :5], X_train[:, 2:]
X_valid_A, X_valid_B = X_valid[:, :5], X_valid[:, 2:]
X_test_A, X_test_B = X_test[:, :5], X_test[:, 2:]
X_new_A, X_new_B = X_test_A[:3], X_test_B[:3]
history = model.fit((X_train_A, X_train_B), y_train, epochs=20,
validation_data=((X_valid_A, X_valid_B), y_valid))
mse_test = model.evaluate((X_test_A, X_test_B), y_test)
y_pred = model.predict((X_new_A, X_new_B))
np.random.seed(42)
tf.random.set_seed(42)
input_A = keras.layers.Input(shape=[5], name="wide_input")
input_B = keras.layers.Input(shape=[6], name="deep_input")
hidden1 = keras.layers.Dense(30, activation="relu")(input_B)
hidden2 = keras.layers.Dense(30, activation="relu")(hidden1)
concat = keras.layers.concatenate([input_A, hidden2])
output = keras.layers.Dense(1, name="main_output")(concat)
aux_output = keras.layers.Dense(1, name="aux_output")(hidden2)
model = keras.models.Model(inputs=[input_A, input_B],
outputs=[output, aux_output])
model.compile(loss=["mse", "mse"], loss_weights=[0.9, 0.1], optimizer=keras.optimizers.SGD(lr=1e-3))
history = model.fit([X_train_A, X_train_B], [y_train, y_train], epochs=20,
validation_data=([X_valid_A, X_valid_B], [y_valid, y_valid]))
total_loss, main_loss, aux_loss = model.evaluate(
[X_test_A, X_test_B], [y_test, y_test])
y_pred_main, y_pred_aux = model.predict([X_new_A, X_new_B])
class WideAndDeepModel(keras.models.Model):
def __init__(self, units=30, activation="relu", **kwargs):
super().__init__(**kwargs)
self.hidden1 = keras.layers.Dense(units, activation=activation)
self.hidden2 = keras.layers.Dense(units, activation=activation)
self.main_output = keras.layers.Dense(1)
self.aux_output = keras.layers.Dense(1)
def call(self, inputs):
input_A, input_B = inputs
hidden1 = self.hidden1(input_B)
hidden2 = self.hidden2(hidden1)
concat = keras.layers.concatenate([input_A, hidden2])
main_output = self.main_output(concat)
aux_output = self.aux_output(hidden2)
return main_output, aux_output
model = WideAndDeepModel(30, activation="relu")
model.compile(loss="mse", loss_weights=[0.9, 0.1], optimizer=keras.optimizers.SGD(lr=1e-3))
history = model.fit((X_train_A, X_train_B), (y_train, y_train), epochs=10,
validation_data=((X_valid_A, X_valid_B), (y_valid, y_valid)))
total_loss, main_loss, aux_loss = model.evaluate((X_test_A, X_test_B), (y_test, y_test))
y_pred_main, y_pred_aux = model.predict((X_new_A, X_new_B))
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Dense(30, activation="relu", input_shape=[8]),
keras.layers.Dense(30, activation="relu"),
keras.layers.Dense(1)
])
model.compile(loss="mse", optimizer=keras.optimizers.SGD(lr=1e-3))
history = model.fit(X_train, y_train, epochs=10, validation_data=(X_valid, y_valid))
mse_test = model.evaluate(X_test, y_test)
model.save("my_keras_model.h5")
model = keras.models.load_model("my_keras_model.h5")
model.predict(X_new)
model.save_weights("my_keras_weights.ckpt")
model.load_weights("my_keras_weights.ckpt")
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Dense(30, activation="relu", input_shape=[8]),
keras.layers.Dense(30, activation="relu"),
keras.layers.Dense(1)
])
model.compile(loss="mse", optimizer=keras.optimizers.SGD(lr=1e-3))
checkpoint_cb = keras.callbacks.ModelCheckpoint("my_keras_model.h5", save_best_only=True)
history = model.fit(X_train, y_train, epochs=10,
validation_data=(X_valid, y_valid),
callbacks=[checkpoint_cb])
model = keras.models.load_model("my_keras_model.h5") # rollback to best model
mse_test = model.evaluate(X_test, y_test)
model.compile(loss="mse", optimizer=keras.optimizers.SGD(lr=1e-3))
early_stopping_cb = keras.callbacks.EarlyStopping(patience=10,
restore_best_weights=True)
history = model.fit(X_train, y_train, epochs=100,
validation_data=(X_valid, y_valid),
callbacks=[checkpoint_cb, early_stopping_cb])
mse_test = model.evaluate(X_test, y_test)
class PrintValTrainRatioCallback(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
print("\nval/train: {:.2f}".format(logs["val_loss"] / logs["loss"]))
val_train_ratio_cb = PrintValTrainRatioCallback()
history = model.fit(X_train, y_train, epochs=1,
validation_data=(X_valid, y_valid),
callbacks=[val_train_ratio_cb])
root_logdir = os.path.join(os.curdir, "my_logs")
def get_run_logdir():
import time
run_id = time.strftime("run_%Y_%m_%d-%H_%M_%S")
return os.path.join(root_logdir, run_id)
run_logdir = get_run_logdir()
run_logdir
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Dense(30, activation="relu", input_shape=[8]),
keras.layers.Dense(30, activation="relu"),
keras.layers.Dense(1)
])
model.compile(loss="mse", optimizer=keras.optimizers.SGD(lr=1e-3))
tensorboard_cb = keras.callbacks.TensorBoard(run_logdir)
history = model.fit(X_train, y_train, epochs=30,
validation_data=(X_valid, y_valid),
callbacks=[checkpoint_cb, tensorboard_cb])
$ tensorboard --logdir=./my_logs --port=6006
%load_ext tensorboard
%tensorboard --logdir=./my_logs --port=6006
run_logdir2 = get_run_logdir()
run_logdir2
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Dense(30, activation="relu", input_shape=[8]),
keras.layers.Dense(30, activation="relu"),
keras.layers.Dense(1)
])
model.compile(loss="mse", optimizer=keras.optimizers.SGD(lr=0.05))
tensorboard_cb = keras.callbacks.TensorBoard(run_logdir2)
history = model.fit(X_train, y_train, epochs=30,
validation_data=(X_valid, y_valid),
callbacks=[checkpoint_cb, tensorboard_cb])
help(keras.callbacks.TensorBoard.__init__)
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
def build_model(n_hidden=1, n_neurons=30, learning_rate=3e-3, input_shape=[8]):
model = keras.models.Sequential()
model.add(keras.layers.InputLayer(input_shape=input_shape))
for layer in range(n_hidden):
model.add(keras.layers.Dense(n_neurons, activation="relu"))
model.add(keras.layers.Dense(1))
optimizer = keras.optimizers.SGD(lr=learning_rate)
model.compile(loss="mse", optimizer=optimizer)
return model
keras_reg = keras.wrappers.scikit_learn.KerasRegressor(build_model)
keras_reg.fit(X_train, y_train, epochs=100,
validation_data=(X_valid, y_valid),
callbacks=[keras.callbacks.EarlyStopping(patience=10)])
mse_test = keras_reg.score(X_test, y_test)
y_pred = keras_reg.predict(X_new)
np.random.seed(42)
tf.random.set_seed(42)
from scipy.stats import reciprocal
from sklearn.model_selection import RandomizedSearchCV
param_distribs = {
"n_hidden": [0, 1, 2, 3],
"n_neurons": np.arange(1, 100) .tolist(),
"learning_rate": reciprocal(3e-4, 3e-2) .rvs(1000).tolist(),
}
rnd_search_cv = RandomizedSearchCV(keras_reg, param_distribs, n_iter=10, cv=3, verbose=2)
rnd_search_cv.fit(X_train, y_train, epochs=100,
validation_data=(X_valid, y_valid),
callbacks=[keras.callbacks.EarlyStopping(patience=10)])
rnd_search_cv.best_params_
rnd_search_cv.best_score_
rnd_search_cv.best_estimator_
rnd_search_cv.score(X_test, y_test)
model = rnd_search_cv.best_estimator_.model
model
model.evaluate(X_test, y_test)
(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.mnist.load_data()
X_train_full.shape
X_train_full.dtype
X_valid, X_train = X_train_full[:5000] / 255., X_train_full[5000:] / 255.
y_valid, y_train = y_train_full[:5000], y_train_full[5000:]
X_test = X_test / 255.
plt.imshow(X_train[0], cmap="binary")
plt.axis('off')
plt.show()
y_train
X_valid.shape
X_test.shape
n_rows = 4
n_cols = 10
plt.figure(figsize=(n_cols * 1.2, n_rows * 1.2))
for row in range(n_rows):
for col in range(n_cols):
index = n_cols * row + col
plt.subplot(n_rows, n_cols, index + 1)
plt.imshow(X_train[index], cmap="binary", interpolation="nearest")
plt.axis('off')
plt.title(y_train[index], fontsize=12)
plt.subplots_adjust(wspace=0.2, hspace=0.5)
plt.show()
K = keras.backend
class ExponentialLearningRate(keras.callbacks.Callback):
def __init__(self, factor):
self.factor = factor
self.rates = []
self.losses = []
def on_batch_end(self, batch, logs):
self.rates.append(K.get_value(self.model.optimizer.lr))
self.losses.append(logs["loss"])
K.set_value(self.model.optimizer.lr, self.model.optimizer.lr * self.factor)
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="relu"),
keras.layers.Dense(100, activation="relu"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
expon_lr = ExponentialLearningRate(factor=1.005)
history = model.fit(X_train, y_train, epochs=1,
validation_data=(X_valid, y_valid),
callbacks=[expon_lr])
plt.plot(expon_lr.rates, expon_lr.losses)
plt.gca().set_xscale('log')
plt.hlines(min(expon_lr.losses), min(expon_lr.rates), max(expon_lr.rates))
plt.axis([min(expon_lr.rates), max(expon_lr.rates), 0, expon_lr.losses[0]])
plt.grid()
plt.xlabel("Learning rate")
plt.ylabel("Loss")
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="relu"),
keras.layers.Dense(100, activation="relu"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=3e-1),
metrics=["accuracy"])
run_index = 1 # increment this at every run
run_logdir = os.path.join(os.curdir, "my_mnist_logs", "run_{:03d}".format(run_index))
run_logdir
early_stopping_cb = keras.callbacks.EarlyStopping(patience=20)
checkpoint_cb = keras.callbacks.ModelCheckpoint("my_mnist_model.h5", save_best_only=True)
tensorboard_cb = keras.callbacks.TensorBoard(run_logdir)
history = model.fit(X_train, y_train, epochs=100,
validation_data=(X_valid, y_valid),
callbacks=[checkpoint_cb, early_stopping_cb, tensorboard_cb])
model = keras.models.load_model("my_mnist_model.h5") # rollback to best model
model.evaluate(X_test, y_test)
%tensorboard --logdir=./my_mnist_logs --port=6006
| 0.59972 | 0.965609 |
<a href="https://colab.research.google.com/github/fadilaasetyabudi/Klinik/blob/master/PenyajianData.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
Penyajian Data
```
**PENYAJIAN DATA **
Salah satu pembahasan dalam statistika deskriptif adalah teknik penyajian data. Data yang sudah kita kumpulkan dan sudah kita peroleh seyogyanya terorganisasi dengan baik, sehingga dapat disajikan dengan baik pula. Data yang tersaji dengan baik dapat dipahami dan bermakna bagi pengguna.
Jika data yang sudah diperoleh jumlahnya sangat banyak, maka cara yang baik untuk menganalisa nya adalah dengan mengorganisasi dan menyajikan data tersebut dalam bentuk yang ringkas dan padat. Sehingga membuat pengguna mudah dan cepat dalam membaca data tersebut. Organisasi data adalah proses penyusunan data di dalam kelompok atau kelas berdasarkan ciri-ciri terttentu.
Berikut ini adalah tujuan organisasi data:
1. Meringkas data mentah untuk tujuan analisis statistika
2. Mengurangi maslah kompleksitas data dan menyajikan sifat-sifat data
3. Memudahkan untuk perbandingan dan membuat kesimpulan tentang data
4. Menyediakan informasi tentang hubungan antara elemen di dalam sekelompok data
5 Memudahkan kita dalam memisahkan elemen data ke dalam kelompok yang sama dan kemudian bisa mengelompokkan berdasarkan kesamaan dan ketidaksamaan elemen tersebut
(Agus Widarjono)
Data disajikan dalam 2 bentuk, yakni:
1. Trabulasi
2. Grafis
**Tabulasi **
Penyajian data dalam bentuk tabulasi adalah penyajian data kedalam bentuk tabel. Sebelum membahas tentang penyajiannya, maka yang perlu dibahas adalah bagaimana cara untuk mengorganisasi data. Salah satu teknik yang digunakan untuk mengorganisasi data adalah distribusi frekuensi (frequency distribution).
*Distribusi Frekuensi*
Untuk membentuk distribusi frekuensi akan dijelaskan dengan contoh berikut. Terdapat data jumlah siswa putus sekolah dari 34 provinsi ,
| No | Provinsi | Laki - Laki | Perempuan | Jumlah |
|----|---------------------------------|--------------|-----------|---------|
| 1 | Prov. D.K.I. Jakarta | 361 | 245 | 606 |
| 2 | Prov. Jawa Barat | 2.859 | 2.767 | 5.626 |
| 3 | Prov. Jawa Tengah | 1.369 | 1.249 | 2.618 |
| 4 | Prov. D.I. Yogyakarta | 138 | 122 | 260 |
| 5 | Prov. Jawa Timur | 2.160 | 1.831 | 3.991 |
| 6 | Prov. Aceh | 691 | 482 | 1.173 |
| 7 | Prov. Sumatera Utara | 1.955 | 1.364 | 3.319 |
| 8 | Prov. Sumatera Barat | 675 | 425 | 1.100 |
| 9 | Prov. Riau | 530 | 481 | 1.011 |
| 10 | Prov. Jambi | 232 | 196 | 428 |
| 11 | Prov. Sumatera Selatan | 962 | 842 | 1.804 |
| 12 | Prov. Lampung | 643 | 688 | 1.331 |
| 13 | Prov. Kalimantan Barat | 561 | 623 | 1.184 |
| 14 | Prov. Kalimantan Tengah | 218 | 228 | 446 |
| 15 | Prov. Kalimantan Selatan | 267 | 224 | 491 |
| 16 | Prov. Kalimantan Timur | 179 | 204 | 383 |
| 17 | Prov. Sulawesi Utara | 175 | 136 | 311 |
| 18 | Prov. Sulawesi Tengah | 194 | 210 | 404 |
| 19 | Prov. Sulawesi Selatan | 767 | 617 | 1.384 |
| 20 | Prov. Sulawesi Tenggara | 511 | 449 | 960 |
| 21 | Prov. Maluku | 149 | 125 | 274 |
| 22 | Prov. Bali | 257 | 249 | 506 |
| 23 | Prov. Nusa Tenggara Barat | 687 | 576 | 1.263 |
| 24 | Prov. Nusa Tenggara Timur | 1.272 | 984 | 2.256 |
| 25 | Prov. Papua | 195 | 196 | 391 |
| 26 | Prov. Bengkulu | 218 | 180 | 398 |
| 27 | Prov. Maluku Utara | 94 | 94 | 188 |
| 28 | Prov. Banten | 699 | 535 | 1.234 |
| 29 | Prov. Kepulauan Bangka Belitung | 132 | 125 | 257 |
| 30 | Prov. Gorontalo | 54 | 76 | 130 |
| 31 | Prov. Kepulauan Riau | 117 | 105 | 222 |
| 32 | Prov. Papua Barat | 117 | 81 | 198 |
| 33 | Prov. Sulawesi Barat | 103 | 100 | 203 |
| 34 | Prov. Kalimantan Utara | 32 | 37 | 69 |
|
Berikut ini adalah langkah-langkah dalam membentuk distribusi frekuensi:
1. Menentukan Jumlah Kelas Interval
Untuk menentukan kelas interval (K), gunakan persamaan ini:
K = 1 + 3, 3logn
Sesuai contoh kasus di atas, maka K = 1+ 3,3 .1,64 = 6,412
Jadi jumlah kelas interval 6 atau 7 . Pada kesempatan ini digunakan 6 kelas.
2. Menghitung Rentang Data
Untuk menghitung rentang data, caranya adalah data terbesar dikurangi data terkecil kemudian ditambah 1. Data terbesar adalah 5.626, dan data terkecil adalah 69.
Jadi 5.626-69+1 = 5558
3. Menghitung Panjang Kelas
Untuk menghitung Panjang Kelas, caranya adalah Rentang Data dibagi Jumlah Kelas Interval sehingga: 5.558 / 6 = 926,333. Walaupun dari hitungan panjang kelas yang diperoleh adalah 926,333, tetapi pada penyusunan tabel ini digunakan panjang kelas 1000. Hal ini akan lebih komunikatif bila dibandingkan dengan menggunkan panjang kelas 926,33.
4. Menyusun Interval Kelas
Secara teoritis penyusunan kelas interval dimulai dari data yang terkecil, yakni 69. Tetapi agar lebih kominikatif, maka dimulai dengan angka 1.
5. Menghitung Frekuensi Menggunakan Tally
Setelah Kelas Interval tersusun, selanjutnya memasukkan data satu per satu mulai awal hingga akhir sesuai Kelas Intervalnya, yakni dengan menandai menggunakan Tally (|||| ||). Misalnya apabila bertemu data 69, maka pada Kelas Interval ke-1 diberi Tally, jika bertemu data 5000 maka pada Kelas Interval ke-6 diberi Tally. Begitu seterusnya sampai 34 data. Sehingga diperoleh data seperti tabel di bawah ini. Apabila semua data telah dimasukkan, kolom Tally bisa dihapus.
| No Kelas | Kelas Interval | Frekuensi (f) |
|----------|------------------|---------------|
| 1 | 1-1000 | 20 |
| 2 | 1001-2000 | 9 |
| 3 | 2001-3000 | 2 |
| 4 | 3001-4000 | 2 |
| 5 | 40001-5000 | 0 |
| 6 | 5001-6000 | 1 |
**Grafis **
Selain tabel, data disajikan dalam bentuk grafis. Dalam penyajian grafis ini, data disajikan dalam bentuk gambar. Terdapat dua macam penyajian grafis, yakni dalam bentuk grafik dan diagram.
**Grafik** - digunakan untuk meyajikan data yang menunjukkan hubungan 2 variabel. Misalnya hubungan variabel jumlah dan variabel waktu.
**Diagram** - digunakan untuk menunjukkan perbandingan data dari berbagai kelompok, biasanya disajikan dalam bentuk perbandingan persentase.
Grafik Garis
Grafik Jumlah Siswa Putus Sekolah Tiap Provinsi Tahun 2016-2017 dan 2017-2018
https://ibb.co/hVB8g0Z
Grafik Jumlah Siswa Putus Sekolah Menurut Jenis Kelamin dan Tingkat Provinsi
https://ibb.co/HNm4R4L
Diagram Batang
Diagram Jumlah Siswa Putus Sekolah Tiap Provinsi Tahun 2016-2017 dan 2017-2018
https://ibb.co/mTr3gHq
Diagram Batnag Jumlah Siswa Putus Sekolah Menurut Jenis Kelamin dan Tingkat Provinsi
https://ibb.co/PMcwK7K
|
github_jupyter
|
Penyajian Data
| 0.236428 | 0.97296 |
# Contextual Bandits with Continuous Actions
In this tutorial we will simulate the scenario of personalizing a thermostat for a household with two rooms using Contextual Bandits in a continuous action space. The goal is to maximize user satisfaction with the thermostat quantified by measuring thermostat accuracy or reward (TR). The thermostat proposes a temperature and the user will either accept the temperature or adjust it to fit their needs.
Let's recall that in a CB setting, a data point has four components,
- Context
- Chosen Action
- Probability of chosen action
- Reward/cost for chosen action
In our simulator we will need to generate a context, get an action/decision for the given context, and also simulate generating a reward.
The goal of the learning agent is to maximize the reward or to minimize the loss.
The thermostat tracks two rooms: 'Living Room' and 'Bedroom'.
Each room will need temperature adjustment either in the morning or in the afternoon.
The context is therefore (room, time_of_day).
In a continuous range we can't specify actions since there are infinite actions we can take across the continuous range. We do however provide the minimum value and the maximum value of the range. Here we will range between 0 degrees Celsius and 32 degrees Celsius using 1 degree increments which gives us a continuous range of 33 degrees.
The reward is measured using the absolute difference between the proposed temperature and the one that was actually set by the people living in the house.
Let's first start with importing the necessary packages:
```
import matplotlib.pyplot as plt
from vowpalwabbit import pyvw
import random
import math
import json
# VW minimizes loss/cost, therefore we will pass cost as -reward
USER_LIKED_TEMPERATURE = -1.0
USER_DISLIKED_TEMPERATURE = 0.0
```
## Simulate reward
In the real world we will have to learn the room temperature preferences as we observe the interactions between the proposed temperature for each room and the one selected by the people living in the house. Since this is a simulation we will have to define the preference profile for each room. The reward that we provide to the learner will follow this preference profile. Our hope is to see if the learner can take better and better decisions as we see more samples which in turn means we are maximizing the reward.
We will also modify the reward function in a few different ways and see if the CB learner picks up the changes. We will compare the TR with and without learning.
VW minimizes the cost, which is defined as -reward. Therefore, we will pass the cost associated to each chosen action to VW.
The reward function below specifies that we want the living room to be cold in the morning but warm in the afternoon. In reverse, we prefer the bedroom to be warm in the morning and cold in the afternoon. It looks dense but we are just simulating our hypothetical world in the format of the feedback the learner understands: cost. If the learner recommends a temperature that aligns with the reward function, we give a positive reward. Max reward is -1.0, min reward is 0 since VW learns in terms of cost, so we return a negative reward. In our simulated world this is the difference between the temperature recommended and the temperature chosen. If the difference is smaller than 5 degrees then we give a reward to the thermostat. This is a steep cost function.
```
def get_cost(context, temperature, min_value, max_value):
range = float(max_value - min_value)
if context['room'] == "Living Room":
if context['time_of_day'] == "morning":
# randomly pick a temperature in this range
selected_temperature = random.uniform(15, 18)
# the absolute difference between selected temperature and proposed temperature
if math.fabs(selected_temperature - temperature) < 5.0:
return USER_LIKED_TEMPERATURE
else:
return USER_DISLIKED_TEMPERATURE
elif context['time_of_day'] == "afternoon":
selected_temperature = random.uniform(25, 29)
# the absolute difference between selected temperature and proposed temperature
if math.fabs(selected_temperature - temperature) < 5.0:
return USER_LIKED_TEMPERATURE
else:
return USER_DISLIKED_TEMPERATURE
else:
return USER_DISLIKED_TEMPERATURE
elif context['room'] == "Bedroom":
if context['time_of_day'] == "morning":
# randomly pick a temperature in this range
selected_temperature = random.uniform(22, 29)
# the absolute difference between selected temperature and proposed temperature
if math.fabs(selected_temperature - temperature) < 5.0:
return USER_LIKED_TEMPERATURE
else:
return USER_DISLIKED_TEMPERATURE
elif context['time_of_day'] == "afternoon":
# randomly pick a temperature in this range
selected_temperature = random.uniform(15, 18)
# the absolute difference between selected temperature and proposed temperature
if math.fabs(selected_temperature - temperature) < 5.0:
return USER_LIKED_TEMPERATURE
else:
return USER_DISLIKED_TEMPERATURE
else:
return USER_DISLIKED_TEMPERATURE
else:
return USER_DISLIKED_TEMPERATURE
# This function modifies (context, temperature (i.e. action), cost, probability) to VW friendly json format
def to_vw_example_format(context, cats_label=None):
example_dict = {}
if cats_label is not None:
chosen_temp, cost, pdf_value = cats_label
example_dict['_label_ca'] = {'action' : chosen_temp, 'cost': cost, 'pdf_value': pdf_value}
example_dict['c'] = {'room={}'.format(context['room']): 1, 'time_of_day={}'.format(context['time_of_day']) : 1}
return json.dumps(example_dict)
```
## Getting a decision
We call VW and get a predicted temperature and the value of the probability density function at that temperature. Since we are predicting over a continuous range VW will sample a pdf before returning the predicted value and the density of the pdf at that point. We are incorporating exploration into our strategy so the pdf will be more dense around the value that VW chooses to predict, and less dense in the rest of the continuous range. So it is more likely that VW will choose an action around the predicted value.
We have all of the information we need to choose a temperature for a specific room and time of day. To use VW to achieve this, we will do the following:
We convert our context into the json format we need.
We pass this example to VW and get the chosen action and the probability of chosing that action.
Finally we return the chosen temperature and the probability of choosing it (we are going to need the probability when we learn form this example)
```
def predict_temperature(vw, context):
vw_text_example = to_vw_example_format(context)
return vw.predict(vw_text_example)
```
## Simulation set up
Now that we have done all of the setup work and know how to interact with VW, let's simulate the world of our two rooms. The scenario is that the thermostat it turned on in each room and it has to propose a temperature. Remember that the reward function allows us to define the worlds reaction to what VW recommends.
We will choose between 'Living Room' and 'Bedroom' uniformly at random and also choose the time of day uniformly at random. We can think of this as tossing a coin to choose between the rooms ('Living Room' if heads and 'Bedroom' if tails) and another coin toss for choosing time of day.
```
rooms = ['Living Room', 'Bedroom']
times_of_day = ['morning', 'afternoon']
def choose_room(rooms):
return random.choice(rooms)
def choose_time_of_day(times_of_day):
return random.choice(times_of_day)
```
We will instantiate a CB learner in VW and then simulate the thermostat interaction num_iterations number of times. In each interaction, we:
1. Decide between 'Living Room' and 'Bedroom'
2. Decide time of day
3. Pass context i.e. (room, time of day) to learner to get a temperature i.e. a value between min (0 degrees) and max (32 degrees) and probability of choosing that temperature
4. Receive reward i.e. see if the proposed temperature was adjusted or not, and by how much. Remember that cost is just negative reward.
5. Format context, action (temperature), probability, and reward into VW format
6. Learn from the example
The above steps are repeatedly executed during our simulations, so we define the process in the run_simulation function. The cost function must be supplied as this is essentially us simulating how the world works.
```
def run_simulation(vw, num_iterations, rooms, times_of_day, cost_function, min_value, max_value, do_learn=True):
reward_rate = []
hits = 0
cost_sum = 0.
for i in range(1, num_iterations + 1):
# 1. In each simulation choose a room
room = choose_room(rooms)
# 2. Choose time of day for a given room
time_of_day = choose_time_of_day(times_of_day)
# 3. Pass context to vw to get a temperature
context = {'room': room, 'time_of_day': time_of_day}
temperature, pdf_value = predict_temperature(vw, context)
# 4. Get cost of the action we chose
cost = cost_function(context, temperature, min_value, max_value)
if cost <= -0.75: # count something as a hit only if it has a high reward
hits += 1
cost_sum += cost
if do_learn:
# 5. Inform VW of what happened so we can learn from it
txt_ex = to_vw_example_format(context, cats_label=(temperature, cost, pdf_value))
vw_format = vw.parse(txt_ex, pyvw.vw.lContinuous)
# 6. Learn
vw.learn(vw_format)
# 7. Let VW know you're done with these objects
vw.finish_example(vw_format)
# We negate this so that on the plot instead of minimizing cost, we are maximizing reward
reward_rate.append(-1*cost_sum/i)
return reward_rate, hits
```
We want to be able to visualize what is occurring, so we are going to plot the reward rate over each iteration of the simulation. If VW is showing temperatures the that are close to what the simulated world wants, the reward will be higher. Below is a little utility function to make showing the plot easier.
```
def plot_reward_rate(num_iterations, reward_rate, title):
plt.show()
plt.plot(range(1, num_iterations + 1), reward_rate)
plt.xlabel('num_iterations', fontsize=14)
plt.ylabel('reward rate', fontsize=14)
plt.title(title)
plt.ylim([0,1])
```
## Scenario 1
We will use the first reward function get_cost and assume that the preferences for room temperatures do not change over time and see what happens to the smart thermostat as we learn. We will also see what happens when there is no learning. We will use the "no learning" case as our baseline to compare to.
We will be using the CATS algorithm which does tree based learning with smoothing. That means that we need to provide the number of actions (buckets/tree leaves) that the continuous range will be discretized into, and then we need to define the bandwidth which is the radius around the chosen discreet action that the algorithm will sample a temperature from with higher probability.
For example, in our current range of 32 degrees celsius if we select the number of actions to be 8 that means that the algorithm will initially predict an action from the centre of one of 8 buckets:
`(0 - 2 - 4), (4 - 6 - 8), (8 - 10 - 12), (12 - 14 - 16), (16 - 18 - 20), (20 - 22 - 24), (24 - 26 - 28), (28 - 30 - 32)`
Let's say that for a given context, it selects the third bucket that starts from 8 degrees celsius, goes until 12 degrees celsius, and has a center of 10 degrees celsius. For a smoothing radius (bandwidth) of 1 the resulting probability density function (pdf) that VW will have to sample from will have a higher density around
`[bucket_centre - bandwidth, bucket_centre + bandwidth]`
i.e. \[9, 11\]. If bandwidth was bigger, for example 5 then we would have higher density (and therefore higher probability of selecting an action) in the range \[5, 15\], providing a smoothing range that spans the discretized buckets. The bandwidth is defined in terms of the continuous range (max_value - min_value)
### With Learning
```
num_iterations = 5000
num_actions = 32
bandwidth = 1
# Instantiate VW learner
vw = pyvw.vw("--cats " + str(num_actions) + " --bandwidth " + str(bandwidth) + " --min_value 0 --max_value 32 --json --chain_hash --coin --epsilon 0.2 -q :: --quiet")
ctr, hits = run_simulation(vw, num_iterations, rooms, times_of_day, get_cost, 0, 32, do_learn=True)
vw.finish()
plot_reward_rate(num_iterations, ctr, 'reward rate with num_actions = 32 and bandwidth = 1')
```
### Without Learning
Let's do the same but without learning. The reward rate never improves and just hovers around 0.5
```
num_iterations = 5000
num_actions = 32
bandwidth = 1
# Instantiate VW learner
vw = pyvw.vw("--cats " + str(num_actions) + " --bandwidth " + str(bandwidth) + " --min_value 0 --max_value 32 --json --chain_hash --coin --epsilon 0.2 -q :: --quiet")
ctr, hits = run_simulation(vw, num_iterations, rooms, times_of_day, get_cost, 0, 32, do_learn=False)
vw.finish()
plot_reward_rate(num_iterations, ctr, 'click through rate with num_actions = 32 and bandwidth = 1')
```
## Parameter sweep
Next let's do a parameter sweep for different values of `num_actions` and `bandwidth`. We will use the below function to help us plot the reward rates for different combinations of `num_actions` and `bandwidths`
```
def plot_reward_sweep(num_iterations, actions, bandwidths, data):
plt.show()
n_actions = len(actions)
n_bandwidths = len(bandwidths)
fig, axs = plt.subplots(n_actions, n_bandwidths)
for i in range(0, len(actions)):
for j in range(0, len(bandwidths)):
if bandwidths[j] >= actions[i]:
axs[i, j].set_title('NA')
continue
reward_rate, hits = data[str(actions[i])][str(bandwidths[j])]
hits_percentage = (hits/(num_iterations))*100
axs[i, j].plot(range(1, num_iterations + 1), reward_rate)
axs[i, j].set_title('hits {:.2f}% TR {:.2f}%'.format(hits_percentage, reward_rate[-1]*100))
axs[i, j].set_ylim([0,1])
for i, row in enumerate(axs):
for j, ax in enumerate(row):
ax.set_xlabel('b: ' + str(bandwidths[j%len(bandwidths)]), fontsize=14)
ax.set_ylabel('k: ' + str(actions[i%len(actions)]), fontsize=14)
fig.text(0.5, 0.04, 'num_iterations', ha='center', fontsize=14)
fig.text(0.04, 0.5, 'reward_rate', va='center', rotation='vertical', fontsize=14)
fig.set_figheight(18)
fig.set_figwidth(30)
plt.suptitle('#examples {}'.format(num_iterations))
# Hide x labels and tick labels for top plots and y ticks for right plots.
for ax in axs.flat:
ax.label_outer()
```
### With Learning
We will try all the number of actions as powers of 2 from 8 until 2048. Since our continuous range stays the same (0-32) we are creating smaller range buckets as the number of actions grows. The number of actions needs to be a power of 2 as it represents the number of leaves that the internal binary tree will have. Small number of actions might result in coarser discretizaton leading to results similar to uniform random. On the other hand really large number of actions could mean that we need a lot more data in order to train all of the buckets.
We will also try all the combinaitons of the above action numbers with bandwidths ranging from 0 to 25. The smaller the bandwidth the smaller the smoothing range around the selected continuous value. Really large bandwidths will result in large smoothing ranges and could lead to results similar to uniform random.
```
# do parameter sweeping
data = {}
num_actions = [8, 32, 64, 128, 256, 512, 1024, 2048]
bandwidths = [0, 1, 2, 3, 25]
num_iterations = 5000
for actions in num_actions:
for bd in bandwidths:
if str(actions) not in data:
data[str(actions)] = {}
if bd >= actions:
continue
print("Starting simulation for: --cats {} --bandwidth {} --min_value 0 --max_value 32 --json --chain_hash --coin --epsilon 0.2 -q :: --quiet".format(actions, bd))
vw = pyvw.vw("--cats " + str(actions) + " --bandwidth " + str(bd) + " --min_value 0 --max_value 32 --json --chain_hash --coin --epsilon 0.2 -q :: --quiet")
rr, hits = run_simulation(vw, num_iterations, rooms, times_of_day, get_cost, 0, 32, do_learn=True)
vw.finish()
print("Done with simulation for num_actions: {} and bandwidth: {}".format(actions, bd))
print()
data[str(actions)][str(bd)] = (rr, hits)
print("Plotting...")
plot_reward_sweep(num_iterations, num_actions, bandwidths, data)
```
### Without Learning
```
# do parameter sweeping
data = {}
num_actions = [8, 32, 64, 128, 256, 512, 1024, 2048]
bandwidths = [0, 1, 2, 3, 25]
num_iterations = 5000
for actions in num_actions:
for bd in bandwidths:
if str(actions) not in data:
data[str(actions)] = {}
if bd >= actions:
continue
print("Starting simulation for: --cats {} --bandwidth {} --min_value 0 --max_value 32 --json --chain_hash --coin --epsilon 0.2 -q :: --quiet".format(actions, bd))
vw = pyvw.vw("--cats " + str(actions) + " --bandwidth " + str(bd) + " --min_value 0 --max_value 32 --json --chain_hash --coin --epsilon 0.2 -q :: --quiet")
rr, hits = run_simulation(vw, num_iterations, rooms, times_of_day, get_cost, 0, 32, do_learn=False)
vw.finish()
print("Done with simulation for num_actions: {} and bandwidth: {}".format(actions, bd))
print()
data[str(actions)][str(bd)] = (rr, hits)
print("Plotting...")
plot_reward_sweep(num_iterations, num_actions, bandwidths, data)
```
## Scenario 2
In the real world peoples preferences change as e.g. the seasons change. So now in the simulation we are going to incorporate two different cost functions, and swap over to the second one halfway through. Below is a a table of the new cost function we are going to use, get_cost_1:
### Living Room
| | get_cost | get_cost_1 |
|:---|:---:|:---:|
| **Morning** | Cold | Hot |
| **Afternoon** | Hot | Cold |
### Bedroom
| | get_cost | get_cost_1 |
|:---|:---:|:---:|
| **Morning** | Hot | Cold |
| **Afternoon** | Cold | Cold |
Below we define the new cost function
```
def get_cost_1(context, temperature, min_value, max_value):
range = float(max_value - min_value)
if context['room'] == "Living Room":
if context['time_of_day'] == "morning":
# randomly pick a temperature in this range
selected_temperature = random.uniform(25, 29)
# the absolute difference between selected temperature and proposed temperature
if math.fabs(selected_temperature - temperature) < 5.0:
return USER_LIKED_TEMPERATURE
else:
return USER_DISLIKED_TEMPERATURE
elif context['time_of_day'] == "afternoon":
selected_temperature = random.uniform(15, 18)
# the absolute difference between selected temperature and proposed temperature
if math.fabs(selected_temperature - temperature) < 5.0:
return USER_LIKED_TEMPERATURE
else:
return USER_DISLIKED_TEMPERATURE
else:
return USER_DISLIKED_TEMPERATURE
elif context['room'] == "Bedroom":
if context['time_of_day'] == "morning":
# randomly pick a temperature in this range
selected_temperature = random.uniform(15, 18)
# the absolute difference between selected temperature and proposed temperature
if math.fabs(selected_temperature - temperature) < 5.0:
return USER_LIKED_TEMPERATURE
else:
return USER_DISLIKED_TEMPERATURE
elif context['time_of_day'] == "afternoon":
# randomly pick a temperature in this range
selected_temperature = random.uniform(15, 18)
# the absolute difference between selected temperature and proposed temperature
if math.fabs(selected_temperature - temperature) < 5.0:
return USER_LIKED_TEMPERATURE
else:
return USER_DISLIKED_TEMPERATURE
else:
return USER_DISLIKED_TEMPERATURE
else:
return USER_DISLIKED_TEMPERATURE
```
To make it easy to show the effect of the cost function changing we are going to modify the run_simulation function. It is a little less readable now, but it supports accepting a list of cost functions and it will operate over each cost function in turn.
```
def run_simulation_multiple_cost_functions(vw, num_iterations, rooms, times_of_day, cost_functions, min_value, max_value, do_learn=True):
reward_rate = []
hits = 0
cost_sum = 0.
start_counter = 1
end_counter = start_counter + num_iterations
for cost_function in cost_functions:
for i in range(start_counter, end_counter):
# 1. In each simulation choose a room
room = choose_room(rooms)
# 2. Choose time of day for a given room
time_of_day = choose_time_of_day(times_of_day)
# 3. Pass context to vw to get a temperature
context = {'room': room, 'time_of_day': time_of_day}
temperature, pdf_value = predict_temperature(vw, context)
# 4. Get cost of the action we chose
cost = cost_function(context, temperature, min_value, max_value)
if cost <= -0.75: # count something as a hit only if it has a high reward
hits += 1
cost_sum += cost
if do_learn:
# 5. Inform VW of what happened so we can learn from it
txt_ex = to_vw_example_format(context, cats_label=(temperature, cost, pdf_value))
vw_format = vw.parse(txt_ex, pyvw.vw.lContinuous)
# 6. Learn
vw.learn(vw_format)
# 7. Let VW know you're done with these objects
vw.finish_example(vw_format)
# We negate this so that on the plot instead of minimizing cost, we are maximizing reward
reward_rate.append(-1*cost_sum/i)
start_counter = end_counter
end_counter = start_counter + num_iterations
return reward_rate, hits
```
### With Learning
Now that we have run a parameter sweep we can better pick the values of num_actions and bandwidth. For the next scenario we will pick `num_actions 128` and `bandwidth 2`.
Let us now switch to the second cost function after a few samples (running the first cost function). Recall that this cost function changes the preferences of the room temperatures but it is still working with the same continuous action space as before. We should see the learner pick up these changes and optimize towards the new preferences.
```
# use first reward function initially and then switch to second reward function
# Instantiate learner in VW
num_actions = 128
bandwidth = 2
# Instantiate VW learner
vw = pyvw.vw("--cats " + str(num_actions) + " --bandwidth " + str(bandwidth) + " --min_value 0 --max_value 32 --json --chain_hash --coin --epsilon 0.2 -q :: --quiet")
num_iterations_per_cost_func = 5000
cost_functions = [get_cost, get_cost_1]
total_iterations = num_iterations_per_cost_func * len(cost_functions)
ctr, hits = run_simulation_multiple_cost_functions(vw, num_iterations_per_cost_func, rooms, times_of_day, cost_functions, 0, 32, do_learn=True)
vw.finish()
plot_reward_rate(total_iterations, ctr, 'reward rate with num_actions = 32 and bandwidth = 1')
```
### Without Learning
```
# use first reward function initially and then switch to second reward function
# Instantiate learner in VW
num_actions = 128
bandwidth = 2
# Instantiate VW learner
vw = pyvw.vw("--cats " + str(num_actions) + " --bandwidth " + str(bandwidth) + " --min_value 0 --max_value 32 --json --chain_hash --coin --epsilon 0.2 -q :: --quiet")
num_iterations_per_cost_func = 5000
cost_functions = [get_cost, get_cost_1]
total_iterations = num_iterations_per_cost_func * len(cost_functions)
ctr, hits = run_simulation_multiple_cost_functions(vw, num_iterations_per_cost_func, rooms, times_of_day, cost_functions, 0, 32, do_learn=False)
vw.finish()
plot_reward_rate(total_iterations, ctr, 'reward rate with num_actions = 32 and bandwidth = 1')
```
## Scenario 3
### Better cost function
The cost function we have been using until now has been a bit too simplistic but has served us well enough to showcase the differences in learning and also in showing CB pickup the new cost cost function and adjust to it.
A slightly better cost function for our simulated world could be the difference between the temperature recommended and the temperature chosen. The smaller the difference the better the thermostat is doing. We are going to model that by taking the absolute cost: `1.0 - |selected_temperature - predicted_temerature| / range` and the transforming that cost into a reward by multiplying it with `-1`
```
def get_smooth_cost(context, temperature, min_value, max_value):
range = float(max_value - min_value)
if context['room'] == "Living Room":
if context['time_of_day'] == "morning":
# randomly pick a temperature in this range
selected_temperature = random.uniform(25, 29)
# the absolute difference between selected temperature and proposed temperature
cost = 1.0 - math.fabs(selected_temperature - temperature) / range
return -1.0 * cost
elif context['time_of_day'] == "afternoon":
selected_temperature = random.uniform(15, 18)
# the absolute difference between selected temperature and proposed temperature
cost = 1.0 - math.fabs(selected_temperature - temperature) / range
return -1.0 * cost
else:
return USER_DISLIKED_TEMPERATURE
elif context['room'] == "Bedroom":
if context['time_of_day'] == "morning":
# randomly pick a temperature in this range
selected_temperature = random.uniform(15, 18)
# the absolute difference between selected temperature and proposed temperature
cost = 1.0 - math.fabs(selected_temperature - temperature) / range
return -1.0 * cost
elif context['time_of_day'] == "afternoon":
# randomly pick a temperature in this range
selected_temperature = random.uniform(15, 18)
# the absolute difference between selected temperature and proposed temperature
cost = 1.0 - math.fabs(selected_temperature - temperature) / range
return -1.0 * cost
else:
return USER_DISLIKED_TEMPERATURE
else:
return USER_DISLIKED_TEMPERATURE
```
Let's try the original paramter sweep with the new cost function `get_smooth_cost`
```
# do parameter sweeping
data = {}
num_actions = [8, 32, 64, 128, 256, 512, 1024, 2048]
bandwidths = [0, 1, 2, 3, 25]
num_iterations = 5000
for actions in num_actions:
for bd in bandwidths:
if str(actions) not in data:
data[str(actions)] = {}
if bd >= actions:
continue
print("Starting simulation for: --cats {} --bandwidth {} --min_value 0 --max_value 32 --json --chain_hash --coin --epsilon 0.2 -q :: --quiet".format(actions, bd))
vw = pyvw.vw("--cats " + str(actions) + " --bandwidth " + str(bd) + " --min_value 0 --max_value 32 --json --chain_hash --coin --epsilon 0.2 -q :: --quiet")
rr, hits = run_simulation(vw, num_iterations, rooms, times_of_day, get_smooth_cost, 0, 32, do_learn=True)
vw.finish()
print("Done with simulation for num_actions: {} and bandwidth: {}".format(actions, bd))
print()
data[str(actions)][str(bd)] = (rr, hits)
print("Plotting...")
plot_reward_sweep(num_iterations, num_actions, bandwidths, data)
```
|
github_jupyter
|
import matplotlib.pyplot as plt
from vowpalwabbit import pyvw
import random
import math
import json
# VW minimizes loss/cost, therefore we will pass cost as -reward
USER_LIKED_TEMPERATURE = -1.0
USER_DISLIKED_TEMPERATURE = 0.0
def get_cost(context, temperature, min_value, max_value):
range = float(max_value - min_value)
if context['room'] == "Living Room":
if context['time_of_day'] == "morning":
# randomly pick a temperature in this range
selected_temperature = random.uniform(15, 18)
# the absolute difference between selected temperature and proposed temperature
if math.fabs(selected_temperature - temperature) < 5.0:
return USER_LIKED_TEMPERATURE
else:
return USER_DISLIKED_TEMPERATURE
elif context['time_of_day'] == "afternoon":
selected_temperature = random.uniform(25, 29)
# the absolute difference between selected temperature and proposed temperature
if math.fabs(selected_temperature - temperature) < 5.0:
return USER_LIKED_TEMPERATURE
else:
return USER_DISLIKED_TEMPERATURE
else:
return USER_DISLIKED_TEMPERATURE
elif context['room'] == "Bedroom":
if context['time_of_day'] == "morning":
# randomly pick a temperature in this range
selected_temperature = random.uniform(22, 29)
# the absolute difference between selected temperature and proposed temperature
if math.fabs(selected_temperature - temperature) < 5.0:
return USER_LIKED_TEMPERATURE
else:
return USER_DISLIKED_TEMPERATURE
elif context['time_of_day'] == "afternoon":
# randomly pick a temperature in this range
selected_temperature = random.uniform(15, 18)
# the absolute difference between selected temperature and proposed temperature
if math.fabs(selected_temperature - temperature) < 5.0:
return USER_LIKED_TEMPERATURE
else:
return USER_DISLIKED_TEMPERATURE
else:
return USER_DISLIKED_TEMPERATURE
else:
return USER_DISLIKED_TEMPERATURE
# This function modifies (context, temperature (i.e. action), cost, probability) to VW friendly json format
def to_vw_example_format(context, cats_label=None):
example_dict = {}
if cats_label is not None:
chosen_temp, cost, pdf_value = cats_label
example_dict['_label_ca'] = {'action' : chosen_temp, 'cost': cost, 'pdf_value': pdf_value}
example_dict['c'] = {'room={}'.format(context['room']): 1, 'time_of_day={}'.format(context['time_of_day']) : 1}
return json.dumps(example_dict)
def predict_temperature(vw, context):
vw_text_example = to_vw_example_format(context)
return vw.predict(vw_text_example)
rooms = ['Living Room', 'Bedroom']
times_of_day = ['morning', 'afternoon']
def choose_room(rooms):
return random.choice(rooms)
def choose_time_of_day(times_of_day):
return random.choice(times_of_day)
def run_simulation(vw, num_iterations, rooms, times_of_day, cost_function, min_value, max_value, do_learn=True):
reward_rate = []
hits = 0
cost_sum = 0.
for i in range(1, num_iterations + 1):
# 1. In each simulation choose a room
room = choose_room(rooms)
# 2. Choose time of day for a given room
time_of_day = choose_time_of_day(times_of_day)
# 3. Pass context to vw to get a temperature
context = {'room': room, 'time_of_day': time_of_day}
temperature, pdf_value = predict_temperature(vw, context)
# 4. Get cost of the action we chose
cost = cost_function(context, temperature, min_value, max_value)
if cost <= -0.75: # count something as a hit only if it has a high reward
hits += 1
cost_sum += cost
if do_learn:
# 5. Inform VW of what happened so we can learn from it
txt_ex = to_vw_example_format(context, cats_label=(temperature, cost, pdf_value))
vw_format = vw.parse(txt_ex, pyvw.vw.lContinuous)
# 6. Learn
vw.learn(vw_format)
# 7. Let VW know you're done with these objects
vw.finish_example(vw_format)
# We negate this so that on the plot instead of minimizing cost, we are maximizing reward
reward_rate.append(-1*cost_sum/i)
return reward_rate, hits
def plot_reward_rate(num_iterations, reward_rate, title):
plt.show()
plt.plot(range(1, num_iterations + 1), reward_rate)
plt.xlabel('num_iterations', fontsize=14)
plt.ylabel('reward rate', fontsize=14)
plt.title(title)
plt.ylim([0,1])
num_iterations = 5000
num_actions = 32
bandwidth = 1
# Instantiate VW learner
vw = pyvw.vw("--cats " + str(num_actions) + " --bandwidth " + str(bandwidth) + " --min_value 0 --max_value 32 --json --chain_hash --coin --epsilon 0.2 -q :: --quiet")
ctr, hits = run_simulation(vw, num_iterations, rooms, times_of_day, get_cost, 0, 32, do_learn=True)
vw.finish()
plot_reward_rate(num_iterations, ctr, 'reward rate with num_actions = 32 and bandwidth = 1')
num_iterations = 5000
num_actions = 32
bandwidth = 1
# Instantiate VW learner
vw = pyvw.vw("--cats " + str(num_actions) + " --bandwidth " + str(bandwidth) + " --min_value 0 --max_value 32 --json --chain_hash --coin --epsilon 0.2 -q :: --quiet")
ctr, hits = run_simulation(vw, num_iterations, rooms, times_of_day, get_cost, 0, 32, do_learn=False)
vw.finish()
plot_reward_rate(num_iterations, ctr, 'click through rate with num_actions = 32 and bandwidth = 1')
def plot_reward_sweep(num_iterations, actions, bandwidths, data):
plt.show()
n_actions = len(actions)
n_bandwidths = len(bandwidths)
fig, axs = plt.subplots(n_actions, n_bandwidths)
for i in range(0, len(actions)):
for j in range(0, len(bandwidths)):
if bandwidths[j] >= actions[i]:
axs[i, j].set_title('NA')
continue
reward_rate, hits = data[str(actions[i])][str(bandwidths[j])]
hits_percentage = (hits/(num_iterations))*100
axs[i, j].plot(range(1, num_iterations + 1), reward_rate)
axs[i, j].set_title('hits {:.2f}% TR {:.2f}%'.format(hits_percentage, reward_rate[-1]*100))
axs[i, j].set_ylim([0,1])
for i, row in enumerate(axs):
for j, ax in enumerate(row):
ax.set_xlabel('b: ' + str(bandwidths[j%len(bandwidths)]), fontsize=14)
ax.set_ylabel('k: ' + str(actions[i%len(actions)]), fontsize=14)
fig.text(0.5, 0.04, 'num_iterations', ha='center', fontsize=14)
fig.text(0.04, 0.5, 'reward_rate', va='center', rotation='vertical', fontsize=14)
fig.set_figheight(18)
fig.set_figwidth(30)
plt.suptitle('#examples {}'.format(num_iterations))
# Hide x labels and tick labels for top plots and y ticks for right plots.
for ax in axs.flat:
ax.label_outer()
# do parameter sweeping
data = {}
num_actions = [8, 32, 64, 128, 256, 512, 1024, 2048]
bandwidths = [0, 1, 2, 3, 25]
num_iterations = 5000
for actions in num_actions:
for bd in bandwidths:
if str(actions) not in data:
data[str(actions)] = {}
if bd >= actions:
continue
print("Starting simulation for: --cats {} --bandwidth {} --min_value 0 --max_value 32 --json --chain_hash --coin --epsilon 0.2 -q :: --quiet".format(actions, bd))
vw = pyvw.vw("--cats " + str(actions) + " --bandwidth " + str(bd) + " --min_value 0 --max_value 32 --json --chain_hash --coin --epsilon 0.2 -q :: --quiet")
rr, hits = run_simulation(vw, num_iterations, rooms, times_of_day, get_cost, 0, 32, do_learn=True)
vw.finish()
print("Done with simulation for num_actions: {} and bandwidth: {}".format(actions, bd))
print()
data[str(actions)][str(bd)] = (rr, hits)
print("Plotting...")
plot_reward_sweep(num_iterations, num_actions, bandwidths, data)
# do parameter sweeping
data = {}
num_actions = [8, 32, 64, 128, 256, 512, 1024, 2048]
bandwidths = [0, 1, 2, 3, 25]
num_iterations = 5000
for actions in num_actions:
for bd in bandwidths:
if str(actions) not in data:
data[str(actions)] = {}
if bd >= actions:
continue
print("Starting simulation for: --cats {} --bandwidth {} --min_value 0 --max_value 32 --json --chain_hash --coin --epsilon 0.2 -q :: --quiet".format(actions, bd))
vw = pyvw.vw("--cats " + str(actions) + " --bandwidth " + str(bd) + " --min_value 0 --max_value 32 --json --chain_hash --coin --epsilon 0.2 -q :: --quiet")
rr, hits = run_simulation(vw, num_iterations, rooms, times_of_day, get_cost, 0, 32, do_learn=False)
vw.finish()
print("Done with simulation for num_actions: {} and bandwidth: {}".format(actions, bd))
print()
data[str(actions)][str(bd)] = (rr, hits)
print("Plotting...")
plot_reward_sweep(num_iterations, num_actions, bandwidths, data)
def get_cost_1(context, temperature, min_value, max_value):
range = float(max_value - min_value)
if context['room'] == "Living Room":
if context['time_of_day'] == "morning":
# randomly pick a temperature in this range
selected_temperature = random.uniform(25, 29)
# the absolute difference between selected temperature and proposed temperature
if math.fabs(selected_temperature - temperature) < 5.0:
return USER_LIKED_TEMPERATURE
else:
return USER_DISLIKED_TEMPERATURE
elif context['time_of_day'] == "afternoon":
selected_temperature = random.uniform(15, 18)
# the absolute difference between selected temperature and proposed temperature
if math.fabs(selected_temperature - temperature) < 5.0:
return USER_LIKED_TEMPERATURE
else:
return USER_DISLIKED_TEMPERATURE
else:
return USER_DISLIKED_TEMPERATURE
elif context['room'] == "Bedroom":
if context['time_of_day'] == "morning":
# randomly pick a temperature in this range
selected_temperature = random.uniform(15, 18)
# the absolute difference between selected temperature and proposed temperature
if math.fabs(selected_temperature - temperature) < 5.0:
return USER_LIKED_TEMPERATURE
else:
return USER_DISLIKED_TEMPERATURE
elif context['time_of_day'] == "afternoon":
# randomly pick a temperature in this range
selected_temperature = random.uniform(15, 18)
# the absolute difference between selected temperature and proposed temperature
if math.fabs(selected_temperature - temperature) < 5.0:
return USER_LIKED_TEMPERATURE
else:
return USER_DISLIKED_TEMPERATURE
else:
return USER_DISLIKED_TEMPERATURE
else:
return USER_DISLIKED_TEMPERATURE
def run_simulation_multiple_cost_functions(vw, num_iterations, rooms, times_of_day, cost_functions, min_value, max_value, do_learn=True):
reward_rate = []
hits = 0
cost_sum = 0.
start_counter = 1
end_counter = start_counter + num_iterations
for cost_function in cost_functions:
for i in range(start_counter, end_counter):
# 1. In each simulation choose a room
room = choose_room(rooms)
# 2. Choose time of day for a given room
time_of_day = choose_time_of_day(times_of_day)
# 3. Pass context to vw to get a temperature
context = {'room': room, 'time_of_day': time_of_day}
temperature, pdf_value = predict_temperature(vw, context)
# 4. Get cost of the action we chose
cost = cost_function(context, temperature, min_value, max_value)
if cost <= -0.75: # count something as a hit only if it has a high reward
hits += 1
cost_sum += cost
if do_learn:
# 5. Inform VW of what happened so we can learn from it
txt_ex = to_vw_example_format(context, cats_label=(temperature, cost, pdf_value))
vw_format = vw.parse(txt_ex, pyvw.vw.lContinuous)
# 6. Learn
vw.learn(vw_format)
# 7. Let VW know you're done with these objects
vw.finish_example(vw_format)
# We negate this so that on the plot instead of minimizing cost, we are maximizing reward
reward_rate.append(-1*cost_sum/i)
start_counter = end_counter
end_counter = start_counter + num_iterations
return reward_rate, hits
# use first reward function initially and then switch to second reward function
# Instantiate learner in VW
num_actions = 128
bandwidth = 2
# Instantiate VW learner
vw = pyvw.vw("--cats " + str(num_actions) + " --bandwidth " + str(bandwidth) + " --min_value 0 --max_value 32 --json --chain_hash --coin --epsilon 0.2 -q :: --quiet")
num_iterations_per_cost_func = 5000
cost_functions = [get_cost, get_cost_1]
total_iterations = num_iterations_per_cost_func * len(cost_functions)
ctr, hits = run_simulation_multiple_cost_functions(vw, num_iterations_per_cost_func, rooms, times_of_day, cost_functions, 0, 32, do_learn=True)
vw.finish()
plot_reward_rate(total_iterations, ctr, 'reward rate with num_actions = 32 and bandwidth = 1')
# use first reward function initially and then switch to second reward function
# Instantiate learner in VW
num_actions = 128
bandwidth = 2
# Instantiate VW learner
vw = pyvw.vw("--cats " + str(num_actions) + " --bandwidth " + str(bandwidth) + " --min_value 0 --max_value 32 --json --chain_hash --coin --epsilon 0.2 -q :: --quiet")
num_iterations_per_cost_func = 5000
cost_functions = [get_cost, get_cost_1]
total_iterations = num_iterations_per_cost_func * len(cost_functions)
ctr, hits = run_simulation_multiple_cost_functions(vw, num_iterations_per_cost_func, rooms, times_of_day, cost_functions, 0, 32, do_learn=False)
vw.finish()
plot_reward_rate(total_iterations, ctr, 'reward rate with num_actions = 32 and bandwidth = 1')
def get_smooth_cost(context, temperature, min_value, max_value):
range = float(max_value - min_value)
if context['room'] == "Living Room":
if context['time_of_day'] == "morning":
# randomly pick a temperature in this range
selected_temperature = random.uniform(25, 29)
# the absolute difference between selected temperature and proposed temperature
cost = 1.0 - math.fabs(selected_temperature - temperature) / range
return -1.0 * cost
elif context['time_of_day'] == "afternoon":
selected_temperature = random.uniform(15, 18)
# the absolute difference between selected temperature and proposed temperature
cost = 1.0 - math.fabs(selected_temperature - temperature) / range
return -1.0 * cost
else:
return USER_DISLIKED_TEMPERATURE
elif context['room'] == "Bedroom":
if context['time_of_day'] == "morning":
# randomly pick a temperature in this range
selected_temperature = random.uniform(15, 18)
# the absolute difference between selected temperature and proposed temperature
cost = 1.0 - math.fabs(selected_temperature - temperature) / range
return -1.0 * cost
elif context['time_of_day'] == "afternoon":
# randomly pick a temperature in this range
selected_temperature = random.uniform(15, 18)
# the absolute difference between selected temperature and proposed temperature
cost = 1.0 - math.fabs(selected_temperature - temperature) / range
return -1.0 * cost
else:
return USER_DISLIKED_TEMPERATURE
else:
return USER_DISLIKED_TEMPERATURE
# do parameter sweeping
data = {}
num_actions = [8, 32, 64, 128, 256, 512, 1024, 2048]
bandwidths = [0, 1, 2, 3, 25]
num_iterations = 5000
for actions in num_actions:
for bd in bandwidths:
if str(actions) not in data:
data[str(actions)] = {}
if bd >= actions:
continue
print("Starting simulation for: --cats {} --bandwidth {} --min_value 0 --max_value 32 --json --chain_hash --coin --epsilon 0.2 -q :: --quiet".format(actions, bd))
vw = pyvw.vw("--cats " + str(actions) + " --bandwidth " + str(bd) + " --min_value 0 --max_value 32 --json --chain_hash --coin --epsilon 0.2 -q :: --quiet")
rr, hits = run_simulation(vw, num_iterations, rooms, times_of_day, get_smooth_cost, 0, 32, do_learn=True)
vw.finish()
print("Done with simulation for num_actions: {} and bandwidth: {}".format(actions, bd))
print()
data[str(actions)][str(bd)] = (rr, hits)
print("Plotting...")
plot_reward_sweep(num_iterations, num_actions, bandwidths, data)
| 0.558086 | 0.990569 |
# Sequential Monte Carlo with two gaussians
```
%matplotlib inline
import pymc3 as pm
import numpy as np
import matplotlib.pyplot as plt
from pymc3.step_methods import smc
import theano.tensor as tt
from tempfile import mkdtemp
import shutil
test_folder = mkdtemp(prefix='SMC_TEST')
plt.style.use('seaborn-darkgrid')
print('Runing on PyMC3 v{}'.format(pm.__version__))
```
Sampling from $n$-dimensional distributions with multiple peaks with a standard Metropolis-Hastings algorithm can be difficult, if not impossible, as the Markov chain often gets stuck in either of the minima.
This problem can be avoided by running many (`n_chains`) Markov chains in parallel for (`n_steps`) steps. To speed this process up we do not sample right away from the posterior distribution, but rather from an intermediate distribution that is similar to the previous distribution. Once the sampling for all the chains is finished, the algorithm enters a 'transitional stage'.
In this stage the similarity between the intermediate distributions is evaluated by a tempering parameter (`beta`), which is automatically determined from the sampling results (coefficient of variation - COV) from the previous intermediate distribution. If the COV is high the cooling is slow, resulting in small steps in `beta` and vice versa.
Also based on the parameter distributions the `MultivariateProposal` is updated and new seed points for the following Markov chains are determined. The end points of the Markov chains with the highest likelihoods are chosen as new seed-points for the Markov chains of the next sampling stage.
So the sampling of the intermediate distribution is repeated until `beta` > 1, which means that the posterior distribution is reached.
```
import pymc3 as pm
import numpy as np
from pymc3.step_methods import smc
import theano.tensor as tt
from matplotlib import pyplot as plt
from tempfile import mkdtemp
import shutil
%matplotlib inline
test_folder = mkdtemp(prefix='ATMIP_TEST')
```
The number of Markov chains and the number of steps each Markov chain is sampling has to be defined, as well as the `tune_interval` and the number of processors to be used in the parallel sampling. In this very simple example using only one processor is faster than forking the interpreter. However, if the calculation cost of the model increases it becomes more efficient to use many processors.
```
n_chains = 500
n_steps = 100
tune_interval = 25
n_jobs = 1
```
Define the number of dimensions for the multivariate gaussians, their weights and the covariance matrix.
```
n = 4
mu1 = np.ones(n) * (1. / 2)
mu2 = -mu1
stdev = 0.1
sigma = np.power(stdev, 2) * np.eye(n)
isigma = np.linalg.inv(sigma)
dsigma = np.linalg.det(sigma)
w1 = 0.1
w2 = (1 - w1)
```
The PyMC3 model. Note that we are making two gaussians, where one has `w1` (90%) of the mass:
```
def two_gaussians(x):
log_like1 = - 0.5 * n * tt.log(2 * np.pi) \
- 0.5 * tt.log(dsigma) \
- 0.5 * (x - mu1).T.dot(isigma).dot(x - mu1)
log_like2 = - 0.5 * n * tt.log(2 * np.pi) \
- 0.5 * tt.log(dsigma) \
- 0.5 * (x - mu2).T.dot(isigma).dot(x - mu2)
return tt.log(w1 * tt.exp(log_like1) + w2 * tt.exp(log_like2))
with pm.Model() as ATMIP_test:
X = pm.Uniform('X',
shape=n,
lower=-2. * np.ones_like(mu1),
upper=2. * np.ones_like(mu1),
testval=-1. * np.ones_like(mu1),
transform=None)
like = pm.Deterministic('like', two_gaussians(X))
llk = pm.Potential('like', like)
```
Note: In contrast to other pymc3 samplers here we have to define a random variable `like` that contains the model likelihood. The likelihood has to be stored in the sampling traces along with the model parameter samples, in order to determine the coefficient of variation [COV] in each transition stage.
Now the sampler is initialised dependent on the previous specifications:
```
with ATMIP_test:
step = smc.SMC(
n_chains=n_chains, tune_interval=tune_interval,
likelihood_name=ATMIP_test.deterministics[0].name)
```
Finally, the sampling is executed:
```
mtrace = smc.ATMIP_sample(
n_steps=n_steps,
step=step,
n_jobs=n_jobs,
progressbar=False,
stage='0',
homepath=test_folder,
model=ATMIP_test,
rm_flag=True)
```
Note: Complex models run for a long time and might stop for some reason during the sampling. In order to restart the sampling in the stage when the sampler stopped, set the stage argument to the right stage number ("`stage='4'`"). The `rm_flag` determines whether existing results are deleted - there is NO additional warning, so the user should pay attention to that one!
Plotting the results using the traceplot:
```
_ = pm.traceplot(mtrace, combined=True)
```
Finally, we delete the sampling result folder. This folder may occupy significant disc-space (Gigabytes), depending on the number of sampling parameters for complex models. So we advice the user to check in advance if there is enough space on the disc.
```
shutil.rmtree(test_folder)
```
|
github_jupyter
|
%matplotlib inline
import pymc3 as pm
import numpy as np
import matplotlib.pyplot as plt
from pymc3.step_methods import smc
import theano.tensor as tt
from tempfile import mkdtemp
import shutil
test_folder = mkdtemp(prefix='SMC_TEST')
plt.style.use('seaborn-darkgrid')
print('Runing on PyMC3 v{}'.format(pm.__version__))
import pymc3 as pm
import numpy as np
from pymc3.step_methods import smc
import theano.tensor as tt
from matplotlib import pyplot as plt
from tempfile import mkdtemp
import shutil
%matplotlib inline
test_folder = mkdtemp(prefix='ATMIP_TEST')
n_chains = 500
n_steps = 100
tune_interval = 25
n_jobs = 1
n = 4
mu1 = np.ones(n) * (1. / 2)
mu2 = -mu1
stdev = 0.1
sigma = np.power(stdev, 2) * np.eye(n)
isigma = np.linalg.inv(sigma)
dsigma = np.linalg.det(sigma)
w1 = 0.1
w2 = (1 - w1)
def two_gaussians(x):
log_like1 = - 0.5 * n * tt.log(2 * np.pi) \
- 0.5 * tt.log(dsigma) \
- 0.5 * (x - mu1).T.dot(isigma).dot(x - mu1)
log_like2 = - 0.5 * n * tt.log(2 * np.pi) \
- 0.5 * tt.log(dsigma) \
- 0.5 * (x - mu2).T.dot(isigma).dot(x - mu2)
return tt.log(w1 * tt.exp(log_like1) + w2 * tt.exp(log_like2))
with pm.Model() as ATMIP_test:
X = pm.Uniform('X',
shape=n,
lower=-2. * np.ones_like(mu1),
upper=2. * np.ones_like(mu1),
testval=-1. * np.ones_like(mu1),
transform=None)
like = pm.Deterministic('like', two_gaussians(X))
llk = pm.Potential('like', like)
with ATMIP_test:
step = smc.SMC(
n_chains=n_chains, tune_interval=tune_interval,
likelihood_name=ATMIP_test.deterministics[0].name)
mtrace = smc.ATMIP_sample(
n_steps=n_steps,
step=step,
n_jobs=n_jobs,
progressbar=False,
stage='0',
homepath=test_folder,
model=ATMIP_test,
rm_flag=True)
_ = pm.traceplot(mtrace, combined=True)
shutil.rmtree(test_folder)
| 0.347316 | 0.990015 |
```
import json
from tqdm.notebook import tqdm
import tldextract
import re
import urllib.parse as urlparse
base_directory = 'khaleesi/'
# Replace * with HTTP or JS request chains file name below
json_chains_dir = base_directory + 'data/crawl-*-labeled.json'
features_dir = base_directory + 'features/*.csv'
with open(json_chains_dir) as f:
data = json.load(f)
ad_keywords = ["click", "measurement", "measure", "promoted", "pagead", "hit", "banner", "2mdn",\
"adsystem", "adsense", "ptracking", "beacon", "openx", "aralego", "usermatch",\
"appnexus", "popunder", "punder", "metrics", "tpid", "pixel", "idsync", "uuid",\
"uid", "advertising", "adsync", "dspid", "dpid", "dpuuid", "tracking", "ad", "delivery",\
"pid", "id_sync", "pxl", "1x1", "px", "pix", "analytics", "csync", "cksync", "adserver",\
"bidder", "ads", "adform", "advert", "iframe", "googlead", "advertise", "track", "prebid",\
"bid", "zoneid", "siteid", "pageid", "viewid", "zone_id", "google_afc" , "google_afs",\
"google_gid", "google_cver", "pix", "rtb", "ssp", "dsp", "dmt", "sync", "doubleclick",\
"match", "tid", "google_nid", "google_dbm", "google_cm", "google_sc"]
ad_keywords_plain = set(["pagead", "measure", "promote", "banner", "2mdn", "adsystem", "adsense",\
"beacon", "openx", "aralego", "usermatch", "metrics", "appnexus", "popunder",\
"punder", "tpid", "pixel", "uuid", "advertising", "dspid", "dpid", "dpuuid",\
"tracking", "adserver", "1x1", "analytics", "adform", "advert", "iframe",\
"googlead", "advertise", "track", "prebid", "zoneid", "siteid", "pageid",\
"viewid", "zone_id", "google_afc", "google_afs", "google_gid", "google_cver",\
"sync", "doubleclick", "match", "google_nid", "google_dbm", "google_cm", "google_sc"])
def keyword_in_url_test(word, url):
regexKeywordsLeft = re.compile(r'[^0-9a-zA-Z]+' + word)
regexKeywordsRight = re.compile(word + r'[^0-9a-zA-Z]')
if regexKeywordsLeft.search(url) or regexKeywordsRight.search(url):
return True
def has_uuid(url):
regexKeyword = re.compile(r'........-....-....-....-............')
if regexKeyword.search(url):
return True
def dimensions_in_url(url):
regexKeyword = re.compile(r'\\d{2,4}[xX]\\d{2,4}')
if regexKeyword.search(url):
return True
out = open(features_dir, 'w')
out.write('identifier,length_of_url,request_method,response_status,etag_in_header,p3p_in_header,has_subdomains,subdomain_of_top_level_domain_check,resource_type,url_has_uuid,url_has_dimensions,response_sets_cookie,third_party_domain,num_non_alphanumeric_chars_in_query_string,top_domain_in_query_string,num_request_cookies,semi_colons_in_url,response_type,response_subtype,content_length,query_string_length,keyword_in_url_re,keyword_in_url,redirect_to_new_domain,length_of_chain,num_unique_domains,num_request_headers,num_response_headers,target\n')
pbar = tqdm(total=len(data), position=0, leave=True)
domains = {}
for key in data:
pbar.update(1)
top_url = data[key]['top_url']
if top_url == None:
top_url = ''
top_url_extracted = tldextract.extract(top_url)
top_domain = top_url_extracted.domain + '.' + top_url_extracted.suffix
top_hostname = top_url_extracted.subdomain + '.' + top_url_extracted.domain + '.' + top_url_extracted.suffix
i = 0
while i < len(data[key]['content']):
identifier = key + '|' + str(data[key]['content'][i]['redirect_id'])
url = data[key]['content'][i]['url']
url_extracted = tldextract.extract(url)
domain = url_extracted.domain + '.' + url_extracted.suffix
hostname = url_extracted.subdomain + '.' + url_extracted.domain + '.' + url_extracted.suffix
request_headers = data[key]['content'][i]['request_headers']
resource_type = data[key]['content'][i]['resource_type']
if key in domains:
domains[key].add(domain)
else:
domains[key] = set([domain])
num_unique_domains = str(len(domains[key]))
length_of_chain = str(i + 1)
query_string = urlparse.urlparse(url).query
query_string_length = str(len(query_string))
num_non_alphanumeric_chars_in_query_string = '0'
if query_string_length != '0':
non_alphanumeric_pattern = r'[^0-9a-zA-Z]'
num_non_alphanumeric_chars_in_query_string = str(
len(re.findall(non_alphanumeric_pattern, query_string)))
subdomain_of_top_level_domain_check = '0'
if top_domain == domain and top_hostname != hostname:
subdomain_of_top_level_domain_check = '1'
top_domain_in_query_string = '0'
if top_domain in query_string:
top_domain_in_query_string = '1'
semi_colons_in_url = '0'
if ';' in query_string:
semi_colons_in_url = '1'
keyword_in_url_re = '0'
for keyword in ad_keywords:
if keyword_in_url_test(keyword, url.lower()) == True:
keyword_in_url_re = '1'
break
keyword_in_url = '0'
for keyword in ad_keywords_plain:
if keyword in url.lower():
keyword_in_url = '1'
break
url_has_uuid = '0'
if has_uuid(url) == True:
url_has_uuid = '1'
url_has_dimensions = '0'
if dimensions_in_url(url) == True:
url_has_dimensions = '1'
length_of_url = str(len(url))
has_subdomains = '1'
if url_extracted.subdomain == '' or url_extracted.subdomain == 'www':
has_subdomains = '0'
third_party_domain = '0'
if top_domain != domain:
third_party_domain = '1'
num_request_headers = str(len(request_headers))
request_method = data[key]['content'][i]['method']
num_request_cookies = '0'
for element in request_headers:
if element[0].lower() == 'cookie':
try:
num_request_cookies = str(element[1].count("; ") + 1)
except:
num_request_cookies = '1'
target = str(False)
if 'ground_truth' in data[key]['content'][i]:
target = str(data[key]['content'][i]['ground_truth'])
num_response_headers = etag_in_header = response_sets_cookie = p3p_in_header = content_length = response_type = response_subtype = response_status = redirect_to_new_domain = '?'
if i != 0:
previous_url = data[key]['content'][i-1]['url']
previous_url_extracted = tldextract.extract(previous_url)
previous_domain = previous_url_extracted.domain + \
'.' + previous_url_extracted.suffix
response_headers = data[key]['content'][i-1]['response_headers']
response_status = str(data[key]['content'][i-1]['response_status'])
num_response_headers = str(len(response_headers))
redirect_to_new_domain = '0'
if domain != previous_domain:
redirect_to_new_domain = '1'
response_type_field = ''
etag_in_header = response_sets_cookie = p3p_in_header = content_length = '0'
for element in response_headers:
if element[0].lower() == 'content-type':
response_type_field = element[1]
elif element[0].lower() == 'content-length':
content_length = str(element[1])
elif element[0].lower() == 'etag':
etag_in_header = '1'
elif element[0].lower() == 'p3p':
p3p_in_header = '1'
elif element[0].lower() == 'set-cookie':
response_sets_cookie = '1'
response_type = '0'
if 'application' in response_type_field:
response_type = '1'
elif 'audio' in response_type_field:
response_type = '2'
elif 'image' in response_type_field:
response_type = '3'
elif 'text' in response_type_field:
response_type = '4'
elif 'video' in response_type_field:
response_type = '5'
elif 'font' in response_type_field:
response_type = '6'
elif 'model' in response_type_field:
response_type = '7'
response_subtype = '0'
if 'html' in response_type_field:
response_subtype = '1'
elif 'css' in response_type_field:
response_subtype = '2'
elif 'javascript' in response_type_field:
response_subtype = '3'
elif 'gif' in response_type_field:
response_subtype = '4'
elif 'png' in response_type_field:
response_subtype = '5'
elif 'jpeg' in response_type_field:
response_subtype = '6'
elif 'plain' in response_type_field:
response_subtype = '7'
elif 'json' in response_type_field:
response_subtype = '8'
out.write(','.join([identifier, length_of_url, request_method, response_status, etag_in_header, p3p_in_header, has_subdomains, subdomain_of_top_level_domain_check, resource_type, url_has_uuid, url_has_dimensions, response_sets_cookie, third_party_domain, num_non_alphanumeric_chars_in_query_string, top_domain_in_query_string,
num_request_cookies, semi_colons_in_url, response_type, response_subtype, content_length, query_string_length, keyword_in_url_re, keyword_in_url, redirect_to_new_domain, length_of_chain, num_unique_domains, num_request_headers, num_response_headers, target]) + '\n')
i += 1
out.close()
```
|
github_jupyter
|
import json
from tqdm.notebook import tqdm
import tldextract
import re
import urllib.parse as urlparse
base_directory = 'khaleesi/'
# Replace * with HTTP or JS request chains file name below
json_chains_dir = base_directory + 'data/crawl-*-labeled.json'
features_dir = base_directory + 'features/*.csv'
with open(json_chains_dir) as f:
data = json.load(f)
ad_keywords = ["click", "measurement", "measure", "promoted", "pagead", "hit", "banner", "2mdn",\
"adsystem", "adsense", "ptracking", "beacon", "openx", "aralego", "usermatch",\
"appnexus", "popunder", "punder", "metrics", "tpid", "pixel", "idsync", "uuid",\
"uid", "advertising", "adsync", "dspid", "dpid", "dpuuid", "tracking", "ad", "delivery",\
"pid", "id_sync", "pxl", "1x1", "px", "pix", "analytics", "csync", "cksync", "adserver",\
"bidder", "ads", "adform", "advert", "iframe", "googlead", "advertise", "track", "prebid",\
"bid", "zoneid", "siteid", "pageid", "viewid", "zone_id", "google_afc" , "google_afs",\
"google_gid", "google_cver", "pix", "rtb", "ssp", "dsp", "dmt", "sync", "doubleclick",\
"match", "tid", "google_nid", "google_dbm", "google_cm", "google_sc"]
ad_keywords_plain = set(["pagead", "measure", "promote", "banner", "2mdn", "adsystem", "adsense",\
"beacon", "openx", "aralego", "usermatch", "metrics", "appnexus", "popunder",\
"punder", "tpid", "pixel", "uuid", "advertising", "dspid", "dpid", "dpuuid",\
"tracking", "adserver", "1x1", "analytics", "adform", "advert", "iframe",\
"googlead", "advertise", "track", "prebid", "zoneid", "siteid", "pageid",\
"viewid", "zone_id", "google_afc", "google_afs", "google_gid", "google_cver",\
"sync", "doubleclick", "match", "google_nid", "google_dbm", "google_cm", "google_sc"])
def keyword_in_url_test(word, url):
regexKeywordsLeft = re.compile(r'[^0-9a-zA-Z]+' + word)
regexKeywordsRight = re.compile(word + r'[^0-9a-zA-Z]')
if regexKeywordsLeft.search(url) or regexKeywordsRight.search(url):
return True
def has_uuid(url):
regexKeyword = re.compile(r'........-....-....-....-............')
if regexKeyword.search(url):
return True
def dimensions_in_url(url):
regexKeyword = re.compile(r'\\d{2,4}[xX]\\d{2,4}')
if regexKeyword.search(url):
return True
out = open(features_dir, 'w')
out.write('identifier,length_of_url,request_method,response_status,etag_in_header,p3p_in_header,has_subdomains,subdomain_of_top_level_domain_check,resource_type,url_has_uuid,url_has_dimensions,response_sets_cookie,third_party_domain,num_non_alphanumeric_chars_in_query_string,top_domain_in_query_string,num_request_cookies,semi_colons_in_url,response_type,response_subtype,content_length,query_string_length,keyword_in_url_re,keyword_in_url,redirect_to_new_domain,length_of_chain,num_unique_domains,num_request_headers,num_response_headers,target\n')
pbar = tqdm(total=len(data), position=0, leave=True)
domains = {}
for key in data:
pbar.update(1)
top_url = data[key]['top_url']
if top_url == None:
top_url = ''
top_url_extracted = tldextract.extract(top_url)
top_domain = top_url_extracted.domain + '.' + top_url_extracted.suffix
top_hostname = top_url_extracted.subdomain + '.' + top_url_extracted.domain + '.' + top_url_extracted.suffix
i = 0
while i < len(data[key]['content']):
identifier = key + '|' + str(data[key]['content'][i]['redirect_id'])
url = data[key]['content'][i]['url']
url_extracted = tldextract.extract(url)
domain = url_extracted.domain + '.' + url_extracted.suffix
hostname = url_extracted.subdomain + '.' + url_extracted.domain + '.' + url_extracted.suffix
request_headers = data[key]['content'][i]['request_headers']
resource_type = data[key]['content'][i]['resource_type']
if key in domains:
domains[key].add(domain)
else:
domains[key] = set([domain])
num_unique_domains = str(len(domains[key]))
length_of_chain = str(i + 1)
query_string = urlparse.urlparse(url).query
query_string_length = str(len(query_string))
num_non_alphanumeric_chars_in_query_string = '0'
if query_string_length != '0':
non_alphanumeric_pattern = r'[^0-9a-zA-Z]'
num_non_alphanumeric_chars_in_query_string = str(
len(re.findall(non_alphanumeric_pattern, query_string)))
subdomain_of_top_level_domain_check = '0'
if top_domain == domain and top_hostname != hostname:
subdomain_of_top_level_domain_check = '1'
top_domain_in_query_string = '0'
if top_domain in query_string:
top_domain_in_query_string = '1'
semi_colons_in_url = '0'
if ';' in query_string:
semi_colons_in_url = '1'
keyword_in_url_re = '0'
for keyword in ad_keywords:
if keyword_in_url_test(keyword, url.lower()) == True:
keyword_in_url_re = '1'
break
keyword_in_url = '0'
for keyword in ad_keywords_plain:
if keyword in url.lower():
keyword_in_url = '1'
break
url_has_uuid = '0'
if has_uuid(url) == True:
url_has_uuid = '1'
url_has_dimensions = '0'
if dimensions_in_url(url) == True:
url_has_dimensions = '1'
length_of_url = str(len(url))
has_subdomains = '1'
if url_extracted.subdomain == '' or url_extracted.subdomain == 'www':
has_subdomains = '0'
third_party_domain = '0'
if top_domain != domain:
third_party_domain = '1'
num_request_headers = str(len(request_headers))
request_method = data[key]['content'][i]['method']
num_request_cookies = '0'
for element in request_headers:
if element[0].lower() == 'cookie':
try:
num_request_cookies = str(element[1].count("; ") + 1)
except:
num_request_cookies = '1'
target = str(False)
if 'ground_truth' in data[key]['content'][i]:
target = str(data[key]['content'][i]['ground_truth'])
num_response_headers = etag_in_header = response_sets_cookie = p3p_in_header = content_length = response_type = response_subtype = response_status = redirect_to_new_domain = '?'
if i != 0:
previous_url = data[key]['content'][i-1]['url']
previous_url_extracted = tldextract.extract(previous_url)
previous_domain = previous_url_extracted.domain + \
'.' + previous_url_extracted.suffix
response_headers = data[key]['content'][i-1]['response_headers']
response_status = str(data[key]['content'][i-1]['response_status'])
num_response_headers = str(len(response_headers))
redirect_to_new_domain = '0'
if domain != previous_domain:
redirect_to_new_domain = '1'
response_type_field = ''
etag_in_header = response_sets_cookie = p3p_in_header = content_length = '0'
for element in response_headers:
if element[0].lower() == 'content-type':
response_type_field = element[1]
elif element[0].lower() == 'content-length':
content_length = str(element[1])
elif element[0].lower() == 'etag':
etag_in_header = '1'
elif element[0].lower() == 'p3p':
p3p_in_header = '1'
elif element[0].lower() == 'set-cookie':
response_sets_cookie = '1'
response_type = '0'
if 'application' in response_type_field:
response_type = '1'
elif 'audio' in response_type_field:
response_type = '2'
elif 'image' in response_type_field:
response_type = '3'
elif 'text' in response_type_field:
response_type = '4'
elif 'video' in response_type_field:
response_type = '5'
elif 'font' in response_type_field:
response_type = '6'
elif 'model' in response_type_field:
response_type = '7'
response_subtype = '0'
if 'html' in response_type_field:
response_subtype = '1'
elif 'css' in response_type_field:
response_subtype = '2'
elif 'javascript' in response_type_field:
response_subtype = '3'
elif 'gif' in response_type_field:
response_subtype = '4'
elif 'png' in response_type_field:
response_subtype = '5'
elif 'jpeg' in response_type_field:
response_subtype = '6'
elif 'plain' in response_type_field:
response_subtype = '7'
elif 'json' in response_type_field:
response_subtype = '8'
out.write(','.join([identifier, length_of_url, request_method, response_status, etag_in_header, p3p_in_header, has_subdomains, subdomain_of_top_level_domain_check, resource_type, url_has_uuid, url_has_dimensions, response_sets_cookie, third_party_domain, num_non_alphanumeric_chars_in_query_string, top_domain_in_query_string,
num_request_cookies, semi_colons_in_url, response_type, response_subtype, content_length, query_string_length, keyword_in_url_re, keyword_in_url, redirect_to_new_domain, length_of_chain, num_unique_domains, num_request_headers, num_response_headers, target]) + '\n')
i += 1
out.close()
| 0.278355 | 0.133641 |
```
import numpy as np
import pandas as pd
import warnings
import matplotlib.pyplot as plt
import seaborn as sns
# silence warnings
warnings.filterwarnings("ignore")
# setting some CONSTANTs
plt.style.use(['science'])
plt.rcParams.update({
"text.usetex": False,
"font.family": "serif", # specify font family here
"font.serif": ["Palatino"], # specify font here
"font.size":12})
FIGSIZE = (12, 8)
```
# Load Data
```
metrics = pd.read_csv("metrics.csv")
metrics
```
# Plotting Metrics
```
sns.set_style("ticks")
sns.set_context("poster")
custom_palette = sns.color_palette("husl", 5)
sns.despine(trim=True);
plt.rcParams.update({
"text.usetex": True,
"font.family": "serif", # specify font family here
"font.serif": ["Palatino"], # specify font here
"font.size":12})
fig, axes = plt.subplots(1, 2, figsize=(28, 13), sharey=False)
for idx, val in enumerate(["Grocery and Gourmet Food", "Pet Supplies"]):
axes[idx].set_title(val)
sns.barplot(x="N",
y="Recall@N",
hue="Algorithm",
data=metrics[metrics["Category"] == val],
ci=None,
ax=axes[idx])
axes[1].set_ylabel("")
axes[1].legend(frameon=False, loc='lower center', bbox_to_anchor=(-.1, -.2), ncol=6)
axes[0].yaxis.labelpad = 20
axes[0].legend().set_visible(False)
# remove minor ticks on x-axis
axes[0].tick_params(axis='x',which='minor',bottom=False)
axes[1].tick_params(axis='x',which='minor',bottom=False)
plt.savefig("recall@n.png", dpi=300, transparent=False)
plt.show()
sns.set_style("ticks")
sns.set_context("poster")
custom_palette = sns.color_palette("husl", 5)
sns.despine(trim=True);
plt.rcParams.update({
"text.usetex": True,
"font.family": "serif", # specify font family here
"font.serif": ["Palatino"], # specify font here
"font.size":12})
fig, axes = plt.subplots(1, 2, figsize=(28, 13), sharey=False)
for idx, val in enumerate(["Grocery and Gourmet Food", "Pet Supplies"]):
axes[idx].set_title(val)
sns.barplot(x="N",
y="Novelty@N",
hue="Algorithm",
data=metrics[metrics["Category"] == val],
ci=None,
ax=axes[idx])
axes[1].set_ylabel("")
axes[1].legend(frameon=False, loc='lower center', bbox_to_anchor=(-.1, -.2), ncol=6)
axes[0].set_ylim(0.7, 1)
axes[1].set_ylim(0.7, 1)
axes[0].yaxis.labelpad = 20
axes[0].legend().set_visible(False)
# remove minor ticks on x-axis
axes[0].tick_params(axis='x',which='minor',bottom=False)
axes[1].tick_params(axis='x',which='minor',bottom=False)
plt.savefig("novelty@n.png", dpi=300, transparent=False)
plt.show()
```
# Cold-Start Users Plot
```
metrics_cold_start = pd.read_csv("metrics_cold_start.csv")
metrics_cold_start.head()
sns.set_style("ticks")
sns.set_context("poster")
custom_palette = sns.color_palette("husl", 5)
sns.despine(trim=True);
plt.rcParams.update({
"text.usetex": True,
"font.family": "serif", # specify font family here
"font.serif": ["Palatino"], # specify font here
"font.size":12})
fig, axes = plt.subplots(1, 2, figsize=(28, 13), sharey=False)
# tracking markers
markers = ['s', 'p', '*', 'h', 'H', 'D']
for idx, val in enumerate(["Grocery and Gourmet Food", "Pet Supplies"]):
axes[idx].set_title(val)
sns.lineplot(x="N",
y="Recall@N",
hue="Algorithm",
data=metrics_cold_start[metrics_cold_start["Category"] == val],
ci=None,
ax=axes[idx],
style="Algorithm",
markers=markers,
markersize=15)
axes[1].set_ylabel("")
axes[1].legend(frameon=False, loc='lower center', bbox_to_anchor=(-.1, -.2), ncol=6)
axes[0].yaxis.labelpad = 20
axes[0].legend().set_visible(False)
axes[0].set_ylim(bottom=0)
axes[1].set_ylim(bottom=0)
# set ticks and remove minor ticks on x-axis
axes[0].set_xticks([5, 10, 15, 20])
axes[1].set_xticks([5, 10, 15, 20])
axes[0].tick_params(axis='x', which='minor', bottom=False)
axes[1].tick_params(axis='x', which='minor', bottom=False)
plt.savefig("cold_start_recall@n.png", dpi=300, transparent=False)
plt.show()
```
# Plotting Popularity Distribution
```
ps_train = pd.read_csv("../../data/processed/Pet_Supplies_processed.csv")
ggf_train = pd.read_csv("../../data/processed/Grocery_and_Gourmet_Food_processed.csv")
item_review_counts_ps = ps_train.groupby(["asin"]).agg({"processedReviewText": "count"})
item_review_counts_ggf = ggf_train.groupby(["asin"]).agg({"processedReviewText": "count"})
# sort by popularity (no. of reviews)
item_review_counts_ps = item_review_counts_ps.sort_values(by="processedReviewText", ascending=False)
item_review_counts_ggf = item_review_counts_ggf.sort_values(by="processedReviewText", ascending=False)
sns.set_style("ticks")
sns.set_context("poster")
custom_palette = sns.color_palette("husl", 5)
sns.despine(trim=True);
plt.rcParams.update({
"text.usetex": True,
"font.family": "serif", # specify font family here
"font.serif": ["Palatino"], # specify font here
"font.size":12})
fig, axes = plt.subplots(1, 2, figsize=(28, 13), sharey=False)
for idx, val in enumerate(zip(["(a) Grocery and Gourmet Food", "(b) Pet Supplies"], [item_review_counts_ggf, item_review_counts_ps])):
axes[idx].set_title(val[0])
val[1].plot(ax=axes[idx])
axes[1].set_ylabel("")
# axes[1].legend(frameon=False, loc='lower center', bbox_to_anchor=(-.1, -.2), ncol=6)
axes[0].set_xticks([])
axes[1].set_xticks([])
axes[0].yaxis.labelpad = 20
axes[0].legend().set_visible(False)
axes[1].legend().set_visible(False)
axes[0].set_ylabel("Popularity (No. of reviews)")
axes[0].set_ylim(bottom=0)
axes[1].set_ylim(bottom=0)
# axes[0].axhline(y=16)
# axes[1].axhline(y=25)
plt.savefig("long-tail.png", dpi=300, transparent=False)
plt.show()
print(item_review_counts_ggf.quantile(.8), item_review_counts_ps.quantile(.8), sep="\n")
```
|
github_jupyter
|
import numpy as np
import pandas as pd
import warnings
import matplotlib.pyplot as plt
import seaborn as sns
# silence warnings
warnings.filterwarnings("ignore")
# setting some CONSTANTs
plt.style.use(['science'])
plt.rcParams.update({
"text.usetex": False,
"font.family": "serif", # specify font family here
"font.serif": ["Palatino"], # specify font here
"font.size":12})
FIGSIZE = (12, 8)
metrics = pd.read_csv("metrics.csv")
metrics
sns.set_style("ticks")
sns.set_context("poster")
custom_palette = sns.color_palette("husl", 5)
sns.despine(trim=True);
plt.rcParams.update({
"text.usetex": True,
"font.family": "serif", # specify font family here
"font.serif": ["Palatino"], # specify font here
"font.size":12})
fig, axes = plt.subplots(1, 2, figsize=(28, 13), sharey=False)
for idx, val in enumerate(["Grocery and Gourmet Food", "Pet Supplies"]):
axes[idx].set_title(val)
sns.barplot(x="N",
y="Recall@N",
hue="Algorithm",
data=metrics[metrics["Category"] == val],
ci=None,
ax=axes[idx])
axes[1].set_ylabel("")
axes[1].legend(frameon=False, loc='lower center', bbox_to_anchor=(-.1, -.2), ncol=6)
axes[0].yaxis.labelpad = 20
axes[0].legend().set_visible(False)
# remove minor ticks on x-axis
axes[0].tick_params(axis='x',which='minor',bottom=False)
axes[1].tick_params(axis='x',which='minor',bottom=False)
plt.savefig("recall@n.png", dpi=300, transparent=False)
plt.show()
sns.set_style("ticks")
sns.set_context("poster")
custom_palette = sns.color_palette("husl", 5)
sns.despine(trim=True);
plt.rcParams.update({
"text.usetex": True,
"font.family": "serif", # specify font family here
"font.serif": ["Palatino"], # specify font here
"font.size":12})
fig, axes = plt.subplots(1, 2, figsize=(28, 13), sharey=False)
for idx, val in enumerate(["Grocery and Gourmet Food", "Pet Supplies"]):
axes[idx].set_title(val)
sns.barplot(x="N",
y="Novelty@N",
hue="Algorithm",
data=metrics[metrics["Category"] == val],
ci=None,
ax=axes[idx])
axes[1].set_ylabel("")
axes[1].legend(frameon=False, loc='lower center', bbox_to_anchor=(-.1, -.2), ncol=6)
axes[0].set_ylim(0.7, 1)
axes[1].set_ylim(0.7, 1)
axes[0].yaxis.labelpad = 20
axes[0].legend().set_visible(False)
# remove minor ticks on x-axis
axes[0].tick_params(axis='x',which='minor',bottom=False)
axes[1].tick_params(axis='x',which='minor',bottom=False)
plt.savefig("novelty@n.png", dpi=300, transparent=False)
plt.show()
metrics_cold_start = pd.read_csv("metrics_cold_start.csv")
metrics_cold_start.head()
sns.set_style("ticks")
sns.set_context("poster")
custom_palette = sns.color_palette("husl", 5)
sns.despine(trim=True);
plt.rcParams.update({
"text.usetex": True,
"font.family": "serif", # specify font family here
"font.serif": ["Palatino"], # specify font here
"font.size":12})
fig, axes = plt.subplots(1, 2, figsize=(28, 13), sharey=False)
# tracking markers
markers = ['s', 'p', '*', 'h', 'H', 'D']
for idx, val in enumerate(["Grocery and Gourmet Food", "Pet Supplies"]):
axes[idx].set_title(val)
sns.lineplot(x="N",
y="Recall@N",
hue="Algorithm",
data=metrics_cold_start[metrics_cold_start["Category"] == val],
ci=None,
ax=axes[idx],
style="Algorithm",
markers=markers,
markersize=15)
axes[1].set_ylabel("")
axes[1].legend(frameon=False, loc='lower center', bbox_to_anchor=(-.1, -.2), ncol=6)
axes[0].yaxis.labelpad = 20
axes[0].legend().set_visible(False)
axes[0].set_ylim(bottom=0)
axes[1].set_ylim(bottom=0)
# set ticks and remove minor ticks on x-axis
axes[0].set_xticks([5, 10, 15, 20])
axes[1].set_xticks([5, 10, 15, 20])
axes[0].tick_params(axis='x', which='minor', bottom=False)
axes[1].tick_params(axis='x', which='minor', bottom=False)
plt.savefig("cold_start_recall@n.png", dpi=300, transparent=False)
plt.show()
ps_train = pd.read_csv("../../data/processed/Pet_Supplies_processed.csv")
ggf_train = pd.read_csv("../../data/processed/Grocery_and_Gourmet_Food_processed.csv")
item_review_counts_ps = ps_train.groupby(["asin"]).agg({"processedReviewText": "count"})
item_review_counts_ggf = ggf_train.groupby(["asin"]).agg({"processedReviewText": "count"})
# sort by popularity (no. of reviews)
item_review_counts_ps = item_review_counts_ps.sort_values(by="processedReviewText", ascending=False)
item_review_counts_ggf = item_review_counts_ggf.sort_values(by="processedReviewText", ascending=False)
sns.set_style("ticks")
sns.set_context("poster")
custom_palette = sns.color_palette("husl", 5)
sns.despine(trim=True);
plt.rcParams.update({
"text.usetex": True,
"font.family": "serif", # specify font family here
"font.serif": ["Palatino"], # specify font here
"font.size":12})
fig, axes = plt.subplots(1, 2, figsize=(28, 13), sharey=False)
for idx, val in enumerate(zip(["(a) Grocery and Gourmet Food", "(b) Pet Supplies"], [item_review_counts_ggf, item_review_counts_ps])):
axes[idx].set_title(val[0])
val[1].plot(ax=axes[idx])
axes[1].set_ylabel("")
# axes[1].legend(frameon=False, loc='lower center', bbox_to_anchor=(-.1, -.2), ncol=6)
axes[0].set_xticks([])
axes[1].set_xticks([])
axes[0].yaxis.labelpad = 20
axes[0].legend().set_visible(False)
axes[1].legend().set_visible(False)
axes[0].set_ylabel("Popularity (No. of reviews)")
axes[0].set_ylim(bottom=0)
axes[1].set_ylim(bottom=0)
# axes[0].axhline(y=16)
# axes[1].axhline(y=25)
plt.savefig("long-tail.png", dpi=300, transparent=False)
plt.show()
print(item_review_counts_ggf.quantile(.8), item_review_counts_ps.quantile(.8), sep="\n")
| 0.394784 | 0.7445 |
```
import random
shapes = ('Hearts','Diamonds','Spades','Clubs')
#copied from pierian data
ranks = ('Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine', 'Ten', 'Jack', 'Queen', 'King', 'Ace')
values = {'Two':2, 'Three':3, 'Four':4, 'Five':5, 'Six':6, 'Seven':7, 'Eight':8,
'Nine':9, 'Ten':10, 'Jack':11, 'Queen':12, 'King':13, 'Ace':14}
# first we need to create CLASS CARD as per data
class cards:
def __init__(self,shape,rank):
self.rank = rank
self.shape = shape
self.value = values[rank]
def __str__(self):
return self.rank + " Of " + self.shape
shapes[0]
ranks[5]
values['Three']
values['Two']
values['Seven']
print(values)
Gamecard1 = cards('Hearts','Seven')
print(Gamecard1)
Gamecard1 = cards('Hearts','Seven')
gamecard2 = cards(shapes[2],ranks[10])
gamecard2.rank
gamecard2.shape
gamecard2.value
print(gamecard2)
class deck_card:
def __init__(self):
self.all_card_list=[]
for shape in shapes:
for rank in ranks:
self.all_card_list.append(cards(shape,rank))
def shuffle_mode(self):
random.shuffle(self.all_card_list)
def Taking_onecard(self):
return self.all_card_list.pop()
d = deck_card()
len(d.all_card_list)
d.all_card_list
print(d)
d.shuffle_mode
print(d.all_card_list[0])
d.all_card_list
d.shuffle_mode()
d.all_card_list
print(d.all_card_list[0])
print(d.all_card_list[51])
taken1 = d.Taking_onecard()
print(taken1)
len(d.all_card_list)
taken2 = d.Taking_onecard()
print(taken2)
len(d.all_card_list)
class player_turn():
def __init__(self,name):
self.name = name
self.all_cards_list = []
def player_took1(self):
return self.all_cards_list.pop(0)
def add_playercards(self,new_cards):
if type(new_cards) == type([]):
self.all_cards_list.extend(new_cards)
else:
self.all_cards_list.append(new_cards)
def __str__(self):
return f'Player {self.name} has {len(self.all_cards_list)} cards '
p = player_turn('Govind')
print(p)
p.add_playercards(taken1)
print(p)
print(taken1)
p.add_playercards(taken2)
print(p)
# GAME LOGIC
class player_turn():
def __init__(self):
self.name = input()
self.all_cards_list = []
def player_took1(self):
return self.all_cards_list.pop(0)
def add_playercards(self,new_cards):
if type(new_cards) == type([]):
self.all_cards_list.extend(new_cards)
else:
self.all_cards_list.append(new_cards)
def __str__(self):
return f'Player {self.name} has {len(self.all_cards_list)} cards '
player_one =player_turn()
print(player_one)
player_two = player_turn()
print(player_two)
# Shuffle
new_deck = deck_card()
new_deck.shuffle_mode()
len(new_deck.all_card_list)
len(new_deck.all_card_list)/2
for card in range(26):
player_one.add_playercards(new_deck.Taking_onecard())
player_two.add_playercards(new_deck.Taking_onecard())
len (player_one.all_cards_list)
len (player_two.all_cards_list)
# GAME ON
player_one.name
game_on = True
round_num = 0
while game_on:
round_num += 1
print(f'Round {round_num} Begins!!!!!')
if len(player_one.all_cards_list) ==0:
print(f'Player {player_one.name}, Out of card!!\n')
print(f'Congrats!,Player {player_two.name} Has won the game...!')
game_on = False
break
if len(player_two.all_cards_list) ==0:
print(f'Player {player_two.name}, Out of card!!\n')
print(f'Congrats!,Player {player_one.name} Has won the game...!')
game_on = False
break
player_one_cards=[]
player_one_cards.append(player_one.player_took1())
player_two_cards=[]
player_two_cards.append(player_two.player_took1())
at_war = True
while at_war:
if player_one_cards[-1].value > player_two_cards[-1].value:
player_one.add_playercards(player_one_cards)
player_one.add_playercards(player_two_cards)
at_war =False
elif player_one_cards[-1].value < player_two_cards[-1].value:
player_two.add_playercards(player_one_cards)
player_two.add_playercards(player_two_cards)
at_war =False
else:
print("WE ARE AT WAR!!!!!")
if len(player_one.all_cards_list) < 5:
print(f"Player {player_one.name} unable to declare war ")
print(f"PLAYER {player_two.name} WINS!!!!!")
game_on=False
break
elif len(player_two.all_cards_list) < 5:
print(f"Player {player_two.name} unable to declare war ")
print(f"PLAYER {player_one.name} WINS!!!!!")
game_on=False
break
else:
for num in range(5):
player_one_cards.append(player_one.player_took1())
player_two_cards.append(player_two.player_took1())
import random
suits = ('Hearts', 'Diamonds', 'Spades', 'Clubs')
ranks = ('Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine', 'Ten', 'Jack', 'Queen', 'King', 'Ace')
values = {'Two':2, 'Three':3, 'Four':4, 'Five':5, 'Six':6, 'Seven':7, 'Eight':8, 'Nine':9, 'Ten':10, 'Jack':10,
'Queen':10, 'King':10, 'Ace':11}
playing = True
class Card:
def __init__(self,suit,rank):
self.suit = suit
self.rank = rank
def __str__(self):
return self.rank + ' of ' + self.suit
class Deck:
def __init__(self):
self.deck = [] # start with an empty list
for suit in suits:
for rank in ranks:
self.deck.append(Card(suit,rank)) # build Card objects and add them to the list
def __str__(self):
deck_comp = '' # start with an empty string
for card in self.deck:
deck_comp += '\n '+card.__str__() # add each Card object's print string
return 'The deck has:' + deck_comp
def shuffle(self):
random.shuffle(self.deck)
def deal(self):
single_card = self.deck.pop()
return single_card
class Hand:
def __init__(self):
self.cards = [] # start with an empty list as we did in the Deck class
self.value = 0 # start with zero value
self.aces = 0 # add an attribute to keep track of aces
def add_card(self,card):
self.cards.append(card)
self.value += values[card.rank]
if card.rank == 'Ace':
self.aces += 1 # add to self.aces
def adjust_for_ace(self):
while self.value > 21 and self.aces:
self.value -= 10
self.aces -= 1
class Chips:
def __init__(self):
self.total = 100 ### This can be set to a default value or supplied by a user input
self.bet = 0
def win_bet(self):
self.total += self.bet
def lose_bet(self):
self.total -= self.bet
c=Chips()
c.
def take_bet(chips):
while True:
try:
chips.bet = int(input('How many chips would you like to bet? '))
except ValueError:
print('Sorry, a bet must be an integer!')
else:
if chips.bet > chips.total:
print("Sorry, your bet can't exceed",chips.total)
else:
break
def hit(deck,hand):
hand.add_card(deck.deal())
hand.adjust_for_ace()
def hit_or_stand(deck,hand):
global playing # to control an upcoming while loop
while True:
x = input("Would you like to Hit or Stand? Enter 'h' or 's' ")
if x[0].lower() == 'h':
hit(deck,hand) # hit() function defined above
elif x[0].lower() == 's':
print("Player stands. Dealer is playing.")
playing = False
else:
print("Sorry, please try again.")
continue
break
def show_some(player,dealer):
print("\nDealer's Hand:")
print(" <card hidden>")
print('',dealer.cards[1])
print("\nPlayer's Hand:", *player.cards, sep='\n ')
def show_all(player,dealer):
print("\nDealer's Hand:", *dealer.cards, sep='\n ')
print("Dealer's Hand =",dealer.value)
print("\nPlayer's Hand:", *player.cards, sep='\n ')
print("Player's Hand =",player.value)
def player_busts(player,dealer,chips):
print("Player busts!")
chips.lose_bet()
def player_wins(player,dealer,chips):
print("Player wins!")
chips.win_bet()
def dealer_busts(player,dealer,chips):
print("Dealer busts!")
chips.win_bet()
def dealer_wins(player,dealer,chips):
print("Dealer wins!")
chips.lose_bet()
def push(player,dealer):
print("Dealer and Player tie! It's a push.")
while True:
# Print an opening statement
print('Welcome to BlackJack! Get as close to 21 as you can without going over!\n\
Dealer hits until she reaches 17. Aces count as 1 or 11.')
# Create & shuffle the deck, deal two cards to each player
deck = Deck()
deck.shuffle()
player_hand = Hand()
player_hand.add_card(deck.deal())
player_hand.add_card(deck.deal())
dealer_hand = Hand()
dealer_hand.add_card(deck.deal())
dealer_hand.add_card(deck.deal())
# Set up the Player's chips
player_chips = Chips() # remember the default value is 100
# Prompt the Player for their bet
take_bet(player_chips)
# Show cards (but keep one dealer card hidden)
show_some(player_hand,dealer_hand)
while playing: # recall this variable from our hit_or_stand function
# Prompt for Player to Hit or Stand
hit_or_stand(deck,player_hand)
# Show cards (but keep one dealer card hidden)
show_some(player_hand,dealer_hand)
# If player's hand exceeds 21, run player_busts() and break out of loop
if player_hand.value > 21:
player_busts(player_hand,dealer_hand,player_chips)
break
# If Player hasn't busted, play Dealer's hand until Dealer reaches 17
if player_hand.value <= 21:
while dealer_hand.value < 17:
hit(deck,dealer_hand)
# Show all cards
show_all(player_hand,dealer_hand)
# Run different winning scenarios
if dealer_hand.value > 21:
dealer_busts(player_hand,dealer_hand,player_chips)
elif dealer_hand.value > player_hand.value:
dealer_wins(player_hand,dealer_hand,player_chips)
elif dealer_hand.value < player_hand.value:
player_wins(player_hand,dealer_hand,player_chips)
else:
push(player_hand,dealer_hand)
# Inform Player of their chips total
print("\nPlayer's winnings stand at",player_chips.total)
# Ask to play again
new_game = input("Would you like to play another hand? Enter 'y' or 'n' ")
if new_game[0].lower()=='y':
playing=True
continue
else:
print("Thanks for playing!")
break
```
|
github_jupyter
|
import random
shapes = ('Hearts','Diamonds','Spades','Clubs')
#copied from pierian data
ranks = ('Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine', 'Ten', 'Jack', 'Queen', 'King', 'Ace')
values = {'Two':2, 'Three':3, 'Four':4, 'Five':5, 'Six':6, 'Seven':7, 'Eight':8,
'Nine':9, 'Ten':10, 'Jack':11, 'Queen':12, 'King':13, 'Ace':14}
# first we need to create CLASS CARD as per data
class cards:
def __init__(self,shape,rank):
self.rank = rank
self.shape = shape
self.value = values[rank]
def __str__(self):
return self.rank + " Of " + self.shape
shapes[0]
ranks[5]
values['Three']
values['Two']
values['Seven']
print(values)
Gamecard1 = cards('Hearts','Seven')
print(Gamecard1)
Gamecard1 = cards('Hearts','Seven')
gamecard2 = cards(shapes[2],ranks[10])
gamecard2.rank
gamecard2.shape
gamecard2.value
print(gamecard2)
class deck_card:
def __init__(self):
self.all_card_list=[]
for shape in shapes:
for rank in ranks:
self.all_card_list.append(cards(shape,rank))
def shuffle_mode(self):
random.shuffle(self.all_card_list)
def Taking_onecard(self):
return self.all_card_list.pop()
d = deck_card()
len(d.all_card_list)
d.all_card_list
print(d)
d.shuffle_mode
print(d.all_card_list[0])
d.all_card_list
d.shuffle_mode()
d.all_card_list
print(d.all_card_list[0])
print(d.all_card_list[51])
taken1 = d.Taking_onecard()
print(taken1)
len(d.all_card_list)
taken2 = d.Taking_onecard()
print(taken2)
len(d.all_card_list)
class player_turn():
def __init__(self,name):
self.name = name
self.all_cards_list = []
def player_took1(self):
return self.all_cards_list.pop(0)
def add_playercards(self,new_cards):
if type(new_cards) == type([]):
self.all_cards_list.extend(new_cards)
else:
self.all_cards_list.append(new_cards)
def __str__(self):
return f'Player {self.name} has {len(self.all_cards_list)} cards '
p = player_turn('Govind')
print(p)
p.add_playercards(taken1)
print(p)
print(taken1)
p.add_playercards(taken2)
print(p)
# GAME LOGIC
class player_turn():
def __init__(self):
self.name = input()
self.all_cards_list = []
def player_took1(self):
return self.all_cards_list.pop(0)
def add_playercards(self,new_cards):
if type(new_cards) == type([]):
self.all_cards_list.extend(new_cards)
else:
self.all_cards_list.append(new_cards)
def __str__(self):
return f'Player {self.name} has {len(self.all_cards_list)} cards '
player_one =player_turn()
print(player_one)
player_two = player_turn()
print(player_two)
# Shuffle
new_deck = deck_card()
new_deck.shuffle_mode()
len(new_deck.all_card_list)
len(new_deck.all_card_list)/2
for card in range(26):
player_one.add_playercards(new_deck.Taking_onecard())
player_two.add_playercards(new_deck.Taking_onecard())
len (player_one.all_cards_list)
len (player_two.all_cards_list)
# GAME ON
player_one.name
game_on = True
round_num = 0
while game_on:
round_num += 1
print(f'Round {round_num} Begins!!!!!')
if len(player_one.all_cards_list) ==0:
print(f'Player {player_one.name}, Out of card!!\n')
print(f'Congrats!,Player {player_two.name} Has won the game...!')
game_on = False
break
if len(player_two.all_cards_list) ==0:
print(f'Player {player_two.name}, Out of card!!\n')
print(f'Congrats!,Player {player_one.name} Has won the game...!')
game_on = False
break
player_one_cards=[]
player_one_cards.append(player_one.player_took1())
player_two_cards=[]
player_two_cards.append(player_two.player_took1())
at_war = True
while at_war:
if player_one_cards[-1].value > player_two_cards[-1].value:
player_one.add_playercards(player_one_cards)
player_one.add_playercards(player_two_cards)
at_war =False
elif player_one_cards[-1].value < player_two_cards[-1].value:
player_two.add_playercards(player_one_cards)
player_two.add_playercards(player_two_cards)
at_war =False
else:
print("WE ARE AT WAR!!!!!")
if len(player_one.all_cards_list) < 5:
print(f"Player {player_one.name} unable to declare war ")
print(f"PLAYER {player_two.name} WINS!!!!!")
game_on=False
break
elif len(player_two.all_cards_list) < 5:
print(f"Player {player_two.name} unable to declare war ")
print(f"PLAYER {player_one.name} WINS!!!!!")
game_on=False
break
else:
for num in range(5):
player_one_cards.append(player_one.player_took1())
player_two_cards.append(player_two.player_took1())
import random
suits = ('Hearts', 'Diamonds', 'Spades', 'Clubs')
ranks = ('Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine', 'Ten', 'Jack', 'Queen', 'King', 'Ace')
values = {'Two':2, 'Three':3, 'Four':4, 'Five':5, 'Six':6, 'Seven':7, 'Eight':8, 'Nine':9, 'Ten':10, 'Jack':10,
'Queen':10, 'King':10, 'Ace':11}
playing = True
class Card:
def __init__(self,suit,rank):
self.suit = suit
self.rank = rank
def __str__(self):
return self.rank + ' of ' + self.suit
class Deck:
def __init__(self):
self.deck = [] # start with an empty list
for suit in suits:
for rank in ranks:
self.deck.append(Card(suit,rank)) # build Card objects and add them to the list
def __str__(self):
deck_comp = '' # start with an empty string
for card in self.deck:
deck_comp += '\n '+card.__str__() # add each Card object's print string
return 'The deck has:' + deck_comp
def shuffle(self):
random.shuffle(self.deck)
def deal(self):
single_card = self.deck.pop()
return single_card
class Hand:
def __init__(self):
self.cards = [] # start with an empty list as we did in the Deck class
self.value = 0 # start with zero value
self.aces = 0 # add an attribute to keep track of aces
def add_card(self,card):
self.cards.append(card)
self.value += values[card.rank]
if card.rank == 'Ace':
self.aces += 1 # add to self.aces
def adjust_for_ace(self):
while self.value > 21 and self.aces:
self.value -= 10
self.aces -= 1
class Chips:
def __init__(self):
self.total = 100 ### This can be set to a default value or supplied by a user input
self.bet = 0
def win_bet(self):
self.total += self.bet
def lose_bet(self):
self.total -= self.bet
c=Chips()
c.
def take_bet(chips):
while True:
try:
chips.bet = int(input('How many chips would you like to bet? '))
except ValueError:
print('Sorry, a bet must be an integer!')
else:
if chips.bet > chips.total:
print("Sorry, your bet can't exceed",chips.total)
else:
break
def hit(deck,hand):
hand.add_card(deck.deal())
hand.adjust_for_ace()
def hit_or_stand(deck,hand):
global playing # to control an upcoming while loop
while True:
x = input("Would you like to Hit or Stand? Enter 'h' or 's' ")
if x[0].lower() == 'h':
hit(deck,hand) # hit() function defined above
elif x[0].lower() == 's':
print("Player stands. Dealer is playing.")
playing = False
else:
print("Sorry, please try again.")
continue
break
def show_some(player,dealer):
print("\nDealer's Hand:")
print(" <card hidden>")
print('',dealer.cards[1])
print("\nPlayer's Hand:", *player.cards, sep='\n ')
def show_all(player,dealer):
print("\nDealer's Hand:", *dealer.cards, sep='\n ')
print("Dealer's Hand =",dealer.value)
print("\nPlayer's Hand:", *player.cards, sep='\n ')
print("Player's Hand =",player.value)
def player_busts(player,dealer,chips):
print("Player busts!")
chips.lose_bet()
def player_wins(player,dealer,chips):
print("Player wins!")
chips.win_bet()
def dealer_busts(player,dealer,chips):
print("Dealer busts!")
chips.win_bet()
def dealer_wins(player,dealer,chips):
print("Dealer wins!")
chips.lose_bet()
def push(player,dealer):
print("Dealer and Player tie! It's a push.")
while True:
# Print an opening statement
print('Welcome to BlackJack! Get as close to 21 as you can without going over!\n\
Dealer hits until she reaches 17. Aces count as 1 or 11.')
# Create & shuffle the deck, deal two cards to each player
deck = Deck()
deck.shuffle()
player_hand = Hand()
player_hand.add_card(deck.deal())
player_hand.add_card(deck.deal())
dealer_hand = Hand()
dealer_hand.add_card(deck.deal())
dealer_hand.add_card(deck.deal())
# Set up the Player's chips
player_chips = Chips() # remember the default value is 100
# Prompt the Player for their bet
take_bet(player_chips)
# Show cards (but keep one dealer card hidden)
show_some(player_hand,dealer_hand)
while playing: # recall this variable from our hit_or_stand function
# Prompt for Player to Hit or Stand
hit_or_stand(deck,player_hand)
# Show cards (but keep one dealer card hidden)
show_some(player_hand,dealer_hand)
# If player's hand exceeds 21, run player_busts() and break out of loop
if player_hand.value > 21:
player_busts(player_hand,dealer_hand,player_chips)
break
# If Player hasn't busted, play Dealer's hand until Dealer reaches 17
if player_hand.value <= 21:
while dealer_hand.value < 17:
hit(deck,dealer_hand)
# Show all cards
show_all(player_hand,dealer_hand)
# Run different winning scenarios
if dealer_hand.value > 21:
dealer_busts(player_hand,dealer_hand,player_chips)
elif dealer_hand.value > player_hand.value:
dealer_wins(player_hand,dealer_hand,player_chips)
elif dealer_hand.value < player_hand.value:
player_wins(player_hand,dealer_hand,player_chips)
else:
push(player_hand,dealer_hand)
# Inform Player of their chips total
print("\nPlayer's winnings stand at",player_chips.total)
# Ask to play again
new_game = input("Would you like to play another hand? Enter 'y' or 'n' ")
if new_game[0].lower()=='y':
playing=True
continue
else:
print("Thanks for playing!")
break
| 0.255622 | 0.2414 |
## Observations and Insights
Mouse subjects were split fairly evenly between male and female for the overall experiment.
Two of the drug regimens decreased tumor volume significantly relative to the placebo trials: Ramicane and Capomulin.
Incidentally Ramicane and Capomulin were the two drugs with the most overall data points studied
## Dependencies and starter code
```
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats as st
# Study data files
mouse_metadata = "data/Mouse_metadata.csv"
study_results = "data/Study_results.csv"
# Read the mouse data and the study results
mouse_metadata = pd.read_csv(mouse_metadata)
study_results = pd.read_csv(study_results)
# mouse_metadata.head()
# study_results.head()
# Combine the data into a single dataset
full_data = pd.merge(mouse_metadata, study_results, on="Mouse ID")
full_data
```
## Summary statistics
```
# Generate a summary statistics table of the tumor volume for each regimen's mean, median,
# variance, standard deviation, and SEM
# unique_drugs = full_data["Drug Regimen"].unique()
# unique_drugs.sort()
# print(unique_drugs)
data_df = pd.DataFrame()
regimen_data = full_data.groupby("Drug Regimen")
data_df["Tumor Volume Mean"] = regimen_data["Tumor Volume (mm3)"].mean().round(decimals=2)
data_df["Tumor Volume Median"] = regimen_data["Tumor Volume (mm3)"].median().round(decimals=2)
data_df["Tumor Volume Variance"] = regimen_data["Tumor Volume (mm3)"].var().round(decimals=2)
data_df["Tumor Volume SD"] = regimen_data["Tumor Volume (mm3)"].std().round(decimals=2)
data_df["Tumor Volume SEM"] = regimen_data["Tumor Volume (mm3)"].sem().round(decimals=2)
data_df
```
## Bar plots
```
# Generate a bar plot showing number of data points for each treatment regimen using pandas
data_df["Count"] = regimen_data["Tumor Volume (mm3)"].count()
data_df.reset_index(inplace=True)
data_df
data_df.plot.bar(x="Drug Regimen", y="Count")
plt.show()
# # Generate a bar plot showing number of data points for each treatment regimen using pyplot
plt.bar(data_df["Drug Regimen"], data_df["Count"])
plt.xticks(rotation=90)
```
## Pie plots
```
# Generate a pie plot showing the distribution of female versus male mice using pandas
needfully_gendered = full_data.drop_duplicates("Mouse ID")
# !!! A small note: the rubric's notes on this section say, " Two bar plots are...""
gender_group = needfully_gendered.groupby("Sex")
gender_df = pd.DataFrame(gender_group["Sex"].count())
# print(gender_df)
gender_df.plot.pie(subplots=True)
# Generate a pie plot showing the distribution of female versus male mice using pyplot
plt.pie(gender_df, labels=["Female","Male"])
plt.title("Sex")
```
## Quartiles, outliers and boxplots
```
# Calculate the final tumor volume of each mouse in the four most promising treatment regimens.
# Calculate the IQR and quantitatively determine if there are any potential outliers.
promising=data_df.sort_values(by="Tumor Volume Mean")
promising_drugs = promising["Drug Regimen"][0:4]
full_data["Promising Drug"] = full_data["Drug Regimen"].isin(promising_drugs)
promising_df = full_data.loc[full_data["Promising Drug"],:].drop_duplicates("Mouse ID",keep="last").reset_index(drop=True)
promising_df.drop(columns=["Sex","Age_months","Weight (g)","Timepoint","Metastatic Sites","Promising Drug"],inplace=True)
promising_df.head()
print(promising_drugs)
quartiles = promising_df["Tumor Volume (mm3)"].quantile([.25,.5,.75])
lowerq = quartiles[0.25]
upperq = quartiles[0.75]
iqr = upperq-lowerq
lower_bound = lowerq - (1.5*iqr)
upper_bound = upperq + (1.5*iqr)
outliers_df = promising_df.loc[(promising_df["Tumor Volume (mm3)"] > upper_bound) | (promising_df["Tumor Volume (mm3)"] < lower_bound), :]
outliers_df
#no outliers present among Capomulin, Ramicane, Propriva, Ceftamin
# Generate a box plot of the final tumor volume of each mouse across four regimens of interest
capo_final = promising_df.loc[promising_df["Drug Regimen"] == "Capomulin"]
rami_final = promising_df.loc[promising_df["Drug Regimen"] == "Ramicane"]
prop_final = promising_df.loc[promising_df["Drug Regimen"] == "Propriva"]
ceft_final = promising_df.loc[promising_df["Drug Regimen"] == "Ceftamin"]
fig, ax = plt.subplots() #each variable contains a set of attribute/methods that are manipulateable or callable. "Fig" can change formatting, "Ax" is about the drawing
ax.boxplot([capo_final["Tumor Volume (mm3)"],rami_final["Tumor Volume (mm3)"],prop_final["Tumor Volume (mm3)"],ceft_final["Tumor Volume (mm3)"]])
ax.set_xticklabels(promising_drugs)
plt.title("Variance in Tumor Volume for Most Promising Regimens", x=.5, y=1)
plt.subplots_adjust(top = 0.99, bottom=0.01, hspace=.25)
```
## Line and scatter plots
```
# Generate a line plot of time point versus tumor volume for *a mouse* treated with Capomulin (s185)
s185 = full_data.loc[full_data["Mouse ID"] == "s185"]
s185
plt.plot(s185["Timepoint"], s185["Tumor Volume (mm3)"])
plt.xlabel("Timepoint")
plt.ylabel("Tumor Volume (mm3)")
plt.title("Capomulin Tumor Volume Over Time: Case Study (s185F)")
# Generate a scatter plot of mouse weight versus average tumor volume for the Capomulin regimen
capomulin = full_data.loc[full_data["Drug Regimen"] == "Capomulin"]
capo_avgs = capomulin.groupby(capomulin["Mouse ID"]).mean()
avg_volume = capo_avgs["Tumor Volume (mm3)"].mean()
plt.figure(figsize=(10, 6))
plt.scatter(capo_avgs["Weight (g)"], capo_avgs["Tumor Volume (mm3)"])
plt.axhline(avg_volume, c="red", alpha=70)
plt.text(25.7,40.7,f"Average Tumor Volume ({round(avg_volume,2)})")
plt.xlabel("Weight (g)")
plt.ylabel("Tumor Volume (mm3)")
plt.title("Tumor Volume by Weight")
# again... not totally sure what output is desired here. "Versus"? Hopefully you think this is cute.
# Calculate the correlation coefficient and linear regression model for mouse weight
# and average tumor volume for the Capomulin regimen
weight = capomulin.groupby(capomulin["Mouse ID"])["Weight (g)"].mean()
volume = capomulin.groupby(capomulin["Mouse ID"])["Tumor Volume (mm3)"].mean()
slope, int, r, p, std_err = st.linregress(weight, volume)
fit = slope * weight + int
plt.scatter(weight,volume)
plt.xlabel("Mouse Weight")
plt.ylabel("Tumor Volume (mm3)")
plt.plot(weight,fit,"--")
plt.xticks(weight, rotation=90)
plt.show()
corr = round(st.pearsonr(weight,volume)[0],2)
print(f'The correlation between weight and tumor volume is {corr}')
```
|
github_jupyter
|
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats as st
# Study data files
mouse_metadata = "data/Mouse_metadata.csv"
study_results = "data/Study_results.csv"
# Read the mouse data and the study results
mouse_metadata = pd.read_csv(mouse_metadata)
study_results = pd.read_csv(study_results)
# mouse_metadata.head()
# study_results.head()
# Combine the data into a single dataset
full_data = pd.merge(mouse_metadata, study_results, on="Mouse ID")
full_data
# Generate a summary statistics table of the tumor volume for each regimen's mean, median,
# variance, standard deviation, and SEM
# unique_drugs = full_data["Drug Regimen"].unique()
# unique_drugs.sort()
# print(unique_drugs)
data_df = pd.DataFrame()
regimen_data = full_data.groupby("Drug Regimen")
data_df["Tumor Volume Mean"] = regimen_data["Tumor Volume (mm3)"].mean().round(decimals=2)
data_df["Tumor Volume Median"] = regimen_data["Tumor Volume (mm3)"].median().round(decimals=2)
data_df["Tumor Volume Variance"] = regimen_data["Tumor Volume (mm3)"].var().round(decimals=2)
data_df["Tumor Volume SD"] = regimen_data["Tumor Volume (mm3)"].std().round(decimals=2)
data_df["Tumor Volume SEM"] = regimen_data["Tumor Volume (mm3)"].sem().round(decimals=2)
data_df
# Generate a bar plot showing number of data points for each treatment regimen using pandas
data_df["Count"] = regimen_data["Tumor Volume (mm3)"].count()
data_df.reset_index(inplace=True)
data_df
data_df.plot.bar(x="Drug Regimen", y="Count")
plt.show()
# # Generate a bar plot showing number of data points for each treatment regimen using pyplot
plt.bar(data_df["Drug Regimen"], data_df["Count"])
plt.xticks(rotation=90)
# Generate a pie plot showing the distribution of female versus male mice using pandas
needfully_gendered = full_data.drop_duplicates("Mouse ID")
# !!! A small note: the rubric's notes on this section say, " Two bar plots are...""
gender_group = needfully_gendered.groupby("Sex")
gender_df = pd.DataFrame(gender_group["Sex"].count())
# print(gender_df)
gender_df.plot.pie(subplots=True)
# Generate a pie plot showing the distribution of female versus male mice using pyplot
plt.pie(gender_df, labels=["Female","Male"])
plt.title("Sex")
# Calculate the final tumor volume of each mouse in the four most promising treatment regimens.
# Calculate the IQR and quantitatively determine if there are any potential outliers.
promising=data_df.sort_values(by="Tumor Volume Mean")
promising_drugs = promising["Drug Regimen"][0:4]
full_data["Promising Drug"] = full_data["Drug Regimen"].isin(promising_drugs)
promising_df = full_data.loc[full_data["Promising Drug"],:].drop_duplicates("Mouse ID",keep="last").reset_index(drop=True)
promising_df.drop(columns=["Sex","Age_months","Weight (g)","Timepoint","Metastatic Sites","Promising Drug"],inplace=True)
promising_df.head()
print(promising_drugs)
quartiles = promising_df["Tumor Volume (mm3)"].quantile([.25,.5,.75])
lowerq = quartiles[0.25]
upperq = quartiles[0.75]
iqr = upperq-lowerq
lower_bound = lowerq - (1.5*iqr)
upper_bound = upperq + (1.5*iqr)
outliers_df = promising_df.loc[(promising_df["Tumor Volume (mm3)"] > upper_bound) | (promising_df["Tumor Volume (mm3)"] < lower_bound), :]
outliers_df
#no outliers present among Capomulin, Ramicane, Propriva, Ceftamin
# Generate a box plot of the final tumor volume of each mouse across four regimens of interest
capo_final = promising_df.loc[promising_df["Drug Regimen"] == "Capomulin"]
rami_final = promising_df.loc[promising_df["Drug Regimen"] == "Ramicane"]
prop_final = promising_df.loc[promising_df["Drug Regimen"] == "Propriva"]
ceft_final = promising_df.loc[promising_df["Drug Regimen"] == "Ceftamin"]
fig, ax = plt.subplots() #each variable contains a set of attribute/methods that are manipulateable or callable. "Fig" can change formatting, "Ax" is about the drawing
ax.boxplot([capo_final["Tumor Volume (mm3)"],rami_final["Tumor Volume (mm3)"],prop_final["Tumor Volume (mm3)"],ceft_final["Tumor Volume (mm3)"]])
ax.set_xticklabels(promising_drugs)
plt.title("Variance in Tumor Volume for Most Promising Regimens", x=.5, y=1)
plt.subplots_adjust(top = 0.99, bottom=0.01, hspace=.25)
# Generate a line plot of time point versus tumor volume for *a mouse* treated with Capomulin (s185)
s185 = full_data.loc[full_data["Mouse ID"] == "s185"]
s185
plt.plot(s185["Timepoint"], s185["Tumor Volume (mm3)"])
plt.xlabel("Timepoint")
plt.ylabel("Tumor Volume (mm3)")
plt.title("Capomulin Tumor Volume Over Time: Case Study (s185F)")
# Generate a scatter plot of mouse weight versus average tumor volume for the Capomulin regimen
capomulin = full_data.loc[full_data["Drug Regimen"] == "Capomulin"]
capo_avgs = capomulin.groupby(capomulin["Mouse ID"]).mean()
avg_volume = capo_avgs["Tumor Volume (mm3)"].mean()
plt.figure(figsize=(10, 6))
plt.scatter(capo_avgs["Weight (g)"], capo_avgs["Tumor Volume (mm3)"])
plt.axhline(avg_volume, c="red", alpha=70)
plt.text(25.7,40.7,f"Average Tumor Volume ({round(avg_volume,2)})")
plt.xlabel("Weight (g)")
plt.ylabel("Tumor Volume (mm3)")
plt.title("Tumor Volume by Weight")
# again... not totally sure what output is desired here. "Versus"? Hopefully you think this is cute.
# Calculate the correlation coefficient and linear regression model for mouse weight
# and average tumor volume for the Capomulin regimen
weight = capomulin.groupby(capomulin["Mouse ID"])["Weight (g)"].mean()
volume = capomulin.groupby(capomulin["Mouse ID"])["Tumor Volume (mm3)"].mean()
slope, int, r, p, std_err = st.linregress(weight, volume)
fit = slope * weight + int
plt.scatter(weight,volume)
plt.xlabel("Mouse Weight")
plt.ylabel("Tumor Volume (mm3)")
plt.plot(weight,fit,"--")
plt.xticks(weight, rotation=90)
plt.show()
corr = round(st.pearsonr(weight,volume)[0],2)
print(f'The correlation between weight and tumor volume is {corr}')
| 0.532668 | 0.947769 |
```
import pandas as pd
from pandas import DataFrame, Series
import numpy as np
from IPython.display import display, HTML
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
%matplotlib inline
import nmrglue as ng
import glob
filenames = glob.glob("*DEPT*.fid")
experiment_list = []
for filename in filenames:
dic, FIDs = ng.varian.read(filename)
count = 0
for i in range(np.shape(FIDs)[0]):
# ignore blank data
thisFID = FIDs[i]
if np.sum(thisFID) == 0.0 or np.max(np.abs(thisFID)) < 0.1:
print "Ignored blank FID %d from %s." % (i+1, filename)
continue
count += 1
experiment_list.append(FIDs[i])
npoints = np.shape(experiment_list[0])[0]
print "Sequence: %s (%s=%s, %s=%s)" % (dic["procpar"]["seqfil"]["values"][0], dic["procpar"]["dn"]["values"][0],
dic["procpar"]["dm"]["values"][0], dic["procpar"]["dn2"]["values"][0],
dic["procpar"]["dm2"]["values"][0])
print "%d FIDs loaded from %s (%d complex points, nt=%sx%s, d1=%s s)." % (count, filename, npoints, len(dic["procpar"]["nt"]["values"]), dic["procpar"]["nt"]["values"][0], dic["procpar"]["d1"]["values"][0])
obs = float(dic["procpar"]["reffrq"]["values"][0]) # spectrometer frequency in MHz
sw = float(dic["procpar"]["sw"]["values"][0])
tof = float(dic["procpar"]["tof"]["values"][0])
carrier = obs*1.0E6 + tof # carrier frequency in Hz
udic = ng.varian.guess_udic(dic, FIDs)
udic[0]['size'] = int(dic["np"]) # number of R|I points in the spectrum
udic[0]['complex'] = True # True if complex data
udic[0]['encoding'] = 'direct' # keep as 'direct'
udic[0]['sw'] = sw # spectral width in Hz
udic[0]['obs'] = obs # Observation freq. in MHz.
udic[0]['car'] = carrier # carrier freq in Hz
udic[0]['label'] = 'C13' # the observed nucleus
udic[0]['time'] = True # whether this is time domain data
udic[0]['freq'] = False
udic["ndim"]=1
def process(dic, FID):
C = ng.convert.converter()
C.from_varian(dic, FID, udic)
pdic, pdata = C.to_pipe()
pdic, pdata = ng.pipe_proc.em(pdic, pdata, lb=0.25) # line broadening
pdic, pdata = ng.pipe_proc.zf(pdic, pdata, size=4*npoints, auto=True)
pdic, pdata = ng.pipe_proc.ft(pdic, pdata)
pdic, pdata = ng.pipe_proc.ps(pdic, pdata, p0=145, p1=-150)
return pdic, pdata
raw_spectrum_list = [ process(dic,FID) for FID in experiment_list ]
raw_spectra = [ spectrum for dic, spectrum in raw_spectrum_list ]
pdic, pdata = raw_spectrum_list[1]
uc = ng.pipe.make_uc(pdic, pdata)
ppm = uc.ppm_scale()
ppm = ppm - ppm[-1] - 10.0
spectrum_number = 0
spectrum = np.real(raw_spectra[spectrum_number])
plt.figure(figsize=(18,4))
plt.xlim(30,0)
plt.ylim(-2500,2500)
plt.plot(ppm, spectrum, "k")
plt.xlabel("13C chemical shift (ppm)")
ax = plt.gca()
ax.tick_params(top="off")
ax.get_xaxis().set_tick_params(length=5,direction='out', width=1)
locs, labels = plt.xticks()
plt.setp(labels, rotation=90)
plt.show()
interval = ppm[1]-ppm[0]
def find_index(this_ppm):
return int(np.ceil((this_ppm-ppm[0])/interval))
def cphase(angle):
return np.exp(1j*np.radians(angle))
peaks = [(125.12,125.00), (125.12,125.00), (90.485,90.365), (49.05, 48.93), (26.18,26.06), (22.625,22.505), (6.855,6.715)]
phases = [1.0, 1.0, 0.0, -1.0, -1.0, -1.0, -3.0]
cphases = [ cphase(i) for i in phases ]
spectrum_number = 0
peak_number = 6
spectrum = raw_spectra[spectrum_number]
plt.figure(figsize=(18,4))
peak = peaks[peak_number]
phase = cphases[peak_number]
plt.plot(ppm, np.real(spectrum*phase), "k")
center = np.average(peak)
x_axis_range = 0.5
start = center+x_axis_range
end = center-x_axis_range
plt.xlim(start,end)
plt.ylim(-5000,20000)
peak_start = find_index(peak[0])
peak_end = find_index(peak[1])
peak_x = ppm[peak_start:peak_end]
peak_y = np.real(spectrum[peak_start:peak_end]*phase)
plt.plot(peak_x,peak_y,"bx")
plt.xlabel("13C chemical shift (ppm)")
plt.xticks(np.linspace(end,start,41))
#plt.yticks([])
ax = plt.gca()
ax.tick_params(top="off")
ax.get_xaxis().set_tick_params(length=5,direction='out', width=1)
locs, labels = plt.xticks()
plt.setp(labels, rotation=90)
plt.show()
def compute_baseline(spectrum, order=2, clip_below = -1000, clip_above = 1000, plot=False):
noise_x = ppm.copy()
noise_y = np.real(spectrum.copy())
mask = np.ones(len(ppm), dtype=bool)
for peak in peaks:
index_low = find_index(peak[0])
index_high = find_index(peak[1])
mask[index_low:index_high] = False
noise_x = noise_x[mask]
noise_y = noise_y[mask]
noise_y = np.clip(noise_y, clip_below, clip_above)
poly_coeff = np.polyfit(noise_x,noise_y,order)
baseline_func = np.poly1d(poly_coeff)
baseline = baseline_func(ppm)
RMSE = np.sqrt(np.mean(np.square(noise_y-baseline_func(noise_x))))
if plot:
plt.figure(figsize=(18,4))
plt.plot(ppm, np.real(spectrum), "k")
plt.plot(ppm, baseline, "r")
y_minus_limit = 1.67*clip_below if clip_below < 0.0 else 0.6*clip_below
y_plus_limit = 1.67*clip_above if clip_above > 0.0 else 0.6*clip_above
plt.ylim(y_minus_limit,y_plus_limit)
plt.xlabel("19F chemical shift (ppm)")
ax = plt.gca()
ax.tick_params(top="off")
ax.get_xaxis().set_tick_params(length=5,direction='out', width=1)
locs, labels = plt.xticks()
plt.setp(labels, rotation=90)
plt.show()
else:
print "%.1E " % (RMSE / 1E5),
for i in poly_coeff:
print "%6.2f" % i,
print
return baseline
baselines = [ compute_baseline(spectrum) for spectrum in raw_spectra ]
subtracted_spectra = [ spectrum - baseline for spectrum, baseline in zip(raw_spectra,baselines) ]
def compute_signal_to_noise(spectrum, noise=(70.0, 60.0), plot=False):
xy = np.array([ppm,spectrum])
min_value2 = find_index(noise[0])
max_value2 = find_index(noise[1])
y_noise = np.real(xy[1,min_value2:max_value2])
zero_level = np.mean(y_noise)
signal_to_noise = []
for i,peak in enumerate(peaks):
min_value1 = find_index(peak[0])
max_value1 = find_index(peak[1])
y_signal = np.real(xy[1,min_value1:max_value1]*cphases[i])
signal_level = np.max(y_signal - zero_level)/2.0
noise_level = np.sqrt(np.mean(np.square(y_noise-zero_level)))
signal_to_noise.append(signal_level / noise_level)
if plot:
print "%.2E %.2E" % (signal_level, noise_level)
print zero_level
plt.plot(x_signal,y_signal,"r")
plt.plot(x_noise,y_noise,"b")
#plt.ylim(-5E4,5E4)
plt.show()
return signal_to_noise
signal_to_noise_list = [ compute_signal_to_noise(spectrum) for spectrum in subtracted_spectra ]
signal_to_noise = DataFrame(signal_to_noise_list)
experiment_numbers = [ i+1 for i in range(len(raw_spectra)) ]
signal_to_noise["run"] = experiment_numbers
signal_to_noise.set_index("run",drop=True,inplace=True)
peak_numbers = range(len(peaks))
peak_numbers = ["peak %d" % (i+1) for i in peak_numbers]
signal_to_noise.columns = peak_numbers
#display(signal_to_noise)
avg_signal_to_noise = signal_to_noise.mean()
avg_signal_to_noise = avg_signal_to_noise.apply(lambda x : "%.0f" % x)
avg_signal_to_noise.name = "S/N"
display(avg_signal_to_noise)
def integrate(spectrum, i, peak):
index_low = find_index(peak[0])
index_high = find_index(peak[1])
return np.sum(np.real(spectrum[index_low:index_high]*cphases[i]))
results=[]
for spectrum in subtracted_spectra:
integrals = []
for i,peak in enumerate(peaks):
integrals.append(integrate(spectrum,i,peak))
integrals = np.array(integrals)
integrals = integrals/1E5
#integrals = 10.00 * integrals / integrals[8]
results.append(integrals)
integrations = DataFrame(results)
integrations.columns = peak_numbers
integrations["run"] = experiment_numbers
integrations.set_index("run",drop=True,inplace=True)
display(integrations)
print "n = %d" % len(integrations)
mean = integrations.mean().apply(lambda x : "%.4f" % x)
stdev = integrations.std().apply(lambda x : "%.4f" % x)
cov = (100.0*integrations.std()/integrations.mean()).apply(lambda x : "%.2f%%" % x)
stderr = (integrations.std() / np.sqrt(len(integrations))).apply(lambda x : "%.4f" % x)
stderr_cov = (100*integrations.std()/(integrations.mean()*np.sqrt(len(integrations)))).apply(lambda x : "%.2f%%" % x)
headings = ["avg", "stdev", "cov", "stderr", "stderr_cov", "S/N"]
summary_df = DataFrame([mean, stdev, cov, stderr, stderr_cov, avg_signal_to_noise], index=headings)
display(summary_df)
```
|
github_jupyter
|
import pandas as pd
from pandas import DataFrame, Series
import numpy as np
from IPython.display import display, HTML
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
%matplotlib inline
import nmrglue as ng
import glob
filenames = glob.glob("*DEPT*.fid")
experiment_list = []
for filename in filenames:
dic, FIDs = ng.varian.read(filename)
count = 0
for i in range(np.shape(FIDs)[0]):
# ignore blank data
thisFID = FIDs[i]
if np.sum(thisFID) == 0.0 or np.max(np.abs(thisFID)) < 0.1:
print "Ignored blank FID %d from %s." % (i+1, filename)
continue
count += 1
experiment_list.append(FIDs[i])
npoints = np.shape(experiment_list[0])[0]
print "Sequence: %s (%s=%s, %s=%s)" % (dic["procpar"]["seqfil"]["values"][0], dic["procpar"]["dn"]["values"][0],
dic["procpar"]["dm"]["values"][0], dic["procpar"]["dn2"]["values"][0],
dic["procpar"]["dm2"]["values"][0])
print "%d FIDs loaded from %s (%d complex points, nt=%sx%s, d1=%s s)." % (count, filename, npoints, len(dic["procpar"]["nt"]["values"]), dic["procpar"]["nt"]["values"][0], dic["procpar"]["d1"]["values"][0])
obs = float(dic["procpar"]["reffrq"]["values"][0]) # spectrometer frequency in MHz
sw = float(dic["procpar"]["sw"]["values"][0])
tof = float(dic["procpar"]["tof"]["values"][0])
carrier = obs*1.0E6 + tof # carrier frequency in Hz
udic = ng.varian.guess_udic(dic, FIDs)
udic[0]['size'] = int(dic["np"]) # number of R|I points in the spectrum
udic[0]['complex'] = True # True if complex data
udic[0]['encoding'] = 'direct' # keep as 'direct'
udic[0]['sw'] = sw # spectral width in Hz
udic[0]['obs'] = obs # Observation freq. in MHz.
udic[0]['car'] = carrier # carrier freq in Hz
udic[0]['label'] = 'C13' # the observed nucleus
udic[0]['time'] = True # whether this is time domain data
udic[0]['freq'] = False
udic["ndim"]=1
def process(dic, FID):
C = ng.convert.converter()
C.from_varian(dic, FID, udic)
pdic, pdata = C.to_pipe()
pdic, pdata = ng.pipe_proc.em(pdic, pdata, lb=0.25) # line broadening
pdic, pdata = ng.pipe_proc.zf(pdic, pdata, size=4*npoints, auto=True)
pdic, pdata = ng.pipe_proc.ft(pdic, pdata)
pdic, pdata = ng.pipe_proc.ps(pdic, pdata, p0=145, p1=-150)
return pdic, pdata
raw_spectrum_list = [ process(dic,FID) for FID in experiment_list ]
raw_spectra = [ spectrum for dic, spectrum in raw_spectrum_list ]
pdic, pdata = raw_spectrum_list[1]
uc = ng.pipe.make_uc(pdic, pdata)
ppm = uc.ppm_scale()
ppm = ppm - ppm[-1] - 10.0
spectrum_number = 0
spectrum = np.real(raw_spectra[spectrum_number])
plt.figure(figsize=(18,4))
plt.xlim(30,0)
plt.ylim(-2500,2500)
plt.plot(ppm, spectrum, "k")
plt.xlabel("13C chemical shift (ppm)")
ax = plt.gca()
ax.tick_params(top="off")
ax.get_xaxis().set_tick_params(length=5,direction='out', width=1)
locs, labels = plt.xticks()
plt.setp(labels, rotation=90)
plt.show()
interval = ppm[1]-ppm[0]
def find_index(this_ppm):
return int(np.ceil((this_ppm-ppm[0])/interval))
def cphase(angle):
return np.exp(1j*np.radians(angle))
peaks = [(125.12,125.00), (125.12,125.00), (90.485,90.365), (49.05, 48.93), (26.18,26.06), (22.625,22.505), (6.855,6.715)]
phases = [1.0, 1.0, 0.0, -1.0, -1.0, -1.0, -3.0]
cphases = [ cphase(i) for i in phases ]
spectrum_number = 0
peak_number = 6
spectrum = raw_spectra[spectrum_number]
plt.figure(figsize=(18,4))
peak = peaks[peak_number]
phase = cphases[peak_number]
plt.plot(ppm, np.real(spectrum*phase), "k")
center = np.average(peak)
x_axis_range = 0.5
start = center+x_axis_range
end = center-x_axis_range
plt.xlim(start,end)
plt.ylim(-5000,20000)
peak_start = find_index(peak[0])
peak_end = find_index(peak[1])
peak_x = ppm[peak_start:peak_end]
peak_y = np.real(spectrum[peak_start:peak_end]*phase)
plt.plot(peak_x,peak_y,"bx")
plt.xlabel("13C chemical shift (ppm)")
plt.xticks(np.linspace(end,start,41))
#plt.yticks([])
ax = plt.gca()
ax.tick_params(top="off")
ax.get_xaxis().set_tick_params(length=5,direction='out', width=1)
locs, labels = plt.xticks()
plt.setp(labels, rotation=90)
plt.show()
def compute_baseline(spectrum, order=2, clip_below = -1000, clip_above = 1000, plot=False):
noise_x = ppm.copy()
noise_y = np.real(spectrum.copy())
mask = np.ones(len(ppm), dtype=bool)
for peak in peaks:
index_low = find_index(peak[0])
index_high = find_index(peak[1])
mask[index_low:index_high] = False
noise_x = noise_x[mask]
noise_y = noise_y[mask]
noise_y = np.clip(noise_y, clip_below, clip_above)
poly_coeff = np.polyfit(noise_x,noise_y,order)
baseline_func = np.poly1d(poly_coeff)
baseline = baseline_func(ppm)
RMSE = np.sqrt(np.mean(np.square(noise_y-baseline_func(noise_x))))
if plot:
plt.figure(figsize=(18,4))
plt.plot(ppm, np.real(spectrum), "k")
plt.plot(ppm, baseline, "r")
y_minus_limit = 1.67*clip_below if clip_below < 0.0 else 0.6*clip_below
y_plus_limit = 1.67*clip_above if clip_above > 0.0 else 0.6*clip_above
plt.ylim(y_minus_limit,y_plus_limit)
plt.xlabel("19F chemical shift (ppm)")
ax = plt.gca()
ax.tick_params(top="off")
ax.get_xaxis().set_tick_params(length=5,direction='out', width=1)
locs, labels = plt.xticks()
plt.setp(labels, rotation=90)
plt.show()
else:
print "%.1E " % (RMSE / 1E5),
for i in poly_coeff:
print "%6.2f" % i,
print
return baseline
baselines = [ compute_baseline(spectrum) for spectrum in raw_spectra ]
subtracted_spectra = [ spectrum - baseline for spectrum, baseline in zip(raw_spectra,baselines) ]
def compute_signal_to_noise(spectrum, noise=(70.0, 60.0), plot=False):
xy = np.array([ppm,spectrum])
min_value2 = find_index(noise[0])
max_value2 = find_index(noise[1])
y_noise = np.real(xy[1,min_value2:max_value2])
zero_level = np.mean(y_noise)
signal_to_noise = []
for i,peak in enumerate(peaks):
min_value1 = find_index(peak[0])
max_value1 = find_index(peak[1])
y_signal = np.real(xy[1,min_value1:max_value1]*cphases[i])
signal_level = np.max(y_signal - zero_level)/2.0
noise_level = np.sqrt(np.mean(np.square(y_noise-zero_level)))
signal_to_noise.append(signal_level / noise_level)
if plot:
print "%.2E %.2E" % (signal_level, noise_level)
print zero_level
plt.plot(x_signal,y_signal,"r")
plt.plot(x_noise,y_noise,"b")
#plt.ylim(-5E4,5E4)
plt.show()
return signal_to_noise
signal_to_noise_list = [ compute_signal_to_noise(spectrum) for spectrum in subtracted_spectra ]
signal_to_noise = DataFrame(signal_to_noise_list)
experiment_numbers = [ i+1 for i in range(len(raw_spectra)) ]
signal_to_noise["run"] = experiment_numbers
signal_to_noise.set_index("run",drop=True,inplace=True)
peak_numbers = range(len(peaks))
peak_numbers = ["peak %d" % (i+1) for i in peak_numbers]
signal_to_noise.columns = peak_numbers
#display(signal_to_noise)
avg_signal_to_noise = signal_to_noise.mean()
avg_signal_to_noise = avg_signal_to_noise.apply(lambda x : "%.0f" % x)
avg_signal_to_noise.name = "S/N"
display(avg_signal_to_noise)
def integrate(spectrum, i, peak):
index_low = find_index(peak[0])
index_high = find_index(peak[1])
return np.sum(np.real(spectrum[index_low:index_high]*cphases[i]))
results=[]
for spectrum in subtracted_spectra:
integrals = []
for i,peak in enumerate(peaks):
integrals.append(integrate(spectrum,i,peak))
integrals = np.array(integrals)
integrals = integrals/1E5
#integrals = 10.00 * integrals / integrals[8]
results.append(integrals)
integrations = DataFrame(results)
integrations.columns = peak_numbers
integrations["run"] = experiment_numbers
integrations.set_index("run",drop=True,inplace=True)
display(integrations)
print "n = %d" % len(integrations)
mean = integrations.mean().apply(lambda x : "%.4f" % x)
stdev = integrations.std().apply(lambda x : "%.4f" % x)
cov = (100.0*integrations.std()/integrations.mean()).apply(lambda x : "%.2f%%" % x)
stderr = (integrations.std() / np.sqrt(len(integrations))).apply(lambda x : "%.4f" % x)
stderr_cov = (100*integrations.std()/(integrations.mean()*np.sqrt(len(integrations)))).apply(lambda x : "%.2f%%" % x)
headings = ["avg", "stdev", "cov", "stderr", "stderr_cov", "S/N"]
summary_df = DataFrame([mean, stdev, cov, stderr, stderr_cov, avg_signal_to_noise], index=headings)
display(summary_df)
| 0.497315 | 0.44083 |
# Exercise 2: Scattering from a pressure-release sphere
## The problem
In the [first](../tutorials/1_sphere_scatterer_null_field.ipynb) and [second](../tutorials/2_sphere_scatterer_direct.ipynb) tutorials, we looked at two formulations for a rigid scattering problem.
In this exercise, you will write your own code to solve a pressure-release scattering problem. As in the tutorials, we will use a unit sphere for $\Omega$, and we define the incident wave by
$$
p_{\text{inc}}(\mathbf x) = \mathrm{e}^{\mathrm{i} k x_0}.
$$
where $\mathbf x = (x_0, x_1, x_2)$.
Acoustic waves are governed by the Helmholtz equation:
$$
\Delta p_\text{total} + k^2 p_\text{total} = 0, \quad \text{ in } \mathbb{R}^3 \backslash \Omega,
$$
where $p_\text{total}$ is the total pressure. We can split $p_\text{total}$ into incident and scattered pressures by writing $p_\text{s}+p_\text{inc}$. The scattered pressure ($p_\text{s}$) satisfies the Sommerfeld radiation condition
$$
\frac{\partial p_\text{s}}{\partial r}-\mathrm{i}kp_\text{s}=o(r^{-1})
$$
when $r:=|\mathbf{x}|\rightarrow\infty$.
For our problem, we impose a Dirichlet boundary condition:
$$
p_\text{total}=0, \quad \text{ on } \Gamma,
$$
where $\Gamma$ is the surface of the sphere $\Omega$.
## The formulation
We use a formulation based on the [direct formulation in the second tutorial](../tutorials/2_sphere_scatterer_direct.ipynb).
### Representation formula
For this problem, we use the following representation formula:
$$
p_\text{s} = \mathcal{D}u -\mathcal{S}\lambda,
$$
where $\mathcal{S}$ is the single layer potential operator; $\mathcal{D}$ is the double layer potential operator; $u$ is the value (or trace) of $p_\text{s}$ on the surface $\Gamma$; and $\lambda$ is the normal derivative of $p_\text{s}$ on the surface $\Gamma$.
For this problem, our boundary condition tells us that $u=-p_\text{inc}$ on $\Gamma$.
### Boundary integral equation
For this problem, we want to solve the following boundary integral equation:
$$
\mathsf{S}\lambda = -(\mathsf{D} - \tfrac{1}{2}\mathsf{I})p_\text{inc},
$$
where $\mathsf{S}$ is the single layer boundary operator; $\mathsf{D}$ is the double layer boundary operator, and $\mathsf{I}$ is the identity operator.
### Solving with Bempp
Your task is to adapt and combine the two example codes in [first](../tutorials/1_sphere_scatterer_null_field.ipynb) and [second](../tutorials/2_sphere_scatterer_direct.ipynb) tutorials to solve this problem and plot a slice of the solution at $z=0$
To get you started, I've copies the first few lines (which were the same in both examples) into the cell below.
```
%matplotlib inline
import bempp.api
from bempp.api.operators.boundary import helmholtz, sparse
from bempp.api.operators.potential import helmholtz as helmholtz_potential
from bempp.api.linalg import gmres
import numpy as np
from matplotlib import pyplot as plt
k = 8.
grid = bempp.api.shapes.regular_sphere(3)
space = bempp.api.function_space(grid, "DP", 0)
```
## What next?
After attempting this exercises, you should read [tutorial 3](../tutorials/3_convergence.ipynb).
|
github_jupyter
|
%matplotlib inline
import bempp.api
from bempp.api.operators.boundary import helmholtz, sparse
from bempp.api.operators.potential import helmholtz as helmholtz_potential
from bempp.api.linalg import gmres
import numpy as np
from matplotlib import pyplot as plt
k = 8.
grid = bempp.api.shapes.regular_sphere(3)
space = bempp.api.function_space(grid, "DP", 0)
| 0.384219 | 0.990678 |
<center>
<img src="../../img/ods_stickers.jpg">
## Открытый курс по машинному обучению
</center>
Автор материала: программист-исследователь Mail.ru Group, старший преподаватель Факультета Компьютерных Наук ВШЭ Юрий Кашницкий. Материал распространяется на условиях лицензии [Creative Commons CC BY-NC-SA 4.0](https://creativecommons.org/licenses/by-nc-sa/4.0/). Можно использовать в любых целях (редактировать, поправлять и брать за основу), кроме коммерческих, но с обязательным упоминанием автора материала.
# <center>Тема 4. Линейные модели классификации и регрессии
## <center>Часть 3. Наглядный пример регуляризации логистической регрессии
В 1 статье уже приводился пример того, как полиномиальные признаки позволяют линейным моделям строить нелинейные разделяющие поверхности. Покажем это в картинках.
Посмотрим, как регуляризация влияет на качество классификации на наборе данных по тестированию микрочипов из курса Andrew Ng по машинному обучению.
Будем использовать логистическую регрессию с полиномиальными признаками и варьировать параметр регуляризации C.
Сначала посмотрим, как регуляризация влияет на разделяющую границу классификатора, интуитивно распознаем переобучение и недообучение.
Потом численно установим близкий к оптимальному параметр регуляризации с помощью кросс-валидации (`cross-validation`) и перебора по сетке (`GridSearch`).
```
from __future__ import division, print_function
# отключим всякие предупреждения Anaconda
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline
from matplotlib import pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV
from sklearn.model_selection import cross_val_score, StratifiedKFold
from sklearn.model_selection import GridSearchCV
```
Загружаем данные с помощью метода `read_csv` библиотеки `pandas`. В этом наборе данных для 118 микрочипов (объекты) указаны результаты двух тестов по контролю качества (два числовых признака) и сказано, пустили ли микрочип в производство. Признаки уже центрированы, то есть из всех значений вычтены средние по столбцам. Таким образом, "среднему" микрочипу соответствуют нулевые значения результатов тестов.
```
# загрузка данных
data = pd.read_csv('../../data/microchip_tests.txt',
header=None, names = ('test1','test2','released'))
# информация о наборе данных
data.info()
```
Посмотрим на первые и последние 5 строк.
```
data.head(5)
data.tail(5)
```
Сохраним обучающую выборку и метки целевого класса в отдельных массивах NumPy.
```
X = data.ix[:,:2].values
y = data.ix[:,2].values
```
Отобразим данные. Красный цвет соответствует бракованным чипам, зеленый – нормальным.
```
plt.scatter(X[y == 1, 0], X[y == 1, 1], c='green', label='Выпущен')
plt.scatter(X[y == 0, 0], X[y == 0, 1], c='red', label='Бракован')
plt.xlabel("Тест 1")
plt.ylabel("Тест 2")
plt.title('2 теста микрочипов')
plt.legend();
```
Определяем функцию для отображения разделяющей кривой классификатора
```
def plot_boundary(clf, X, y, grid_step=.01, poly_featurizer=None):
x_min, x_max = X[:, 0].min() - .1, X[:, 0].max() + .1
y_min, y_max = X[:, 1].min() - .1, X[:, 1].max() + .1
xx, yy = np.meshgrid(np.arange(x_min, x_max, grid_step),
np.arange(y_min, y_max, grid_step))
# каждой точке в сетке [x_min, m_max]x[y_min, y_max]
# ставим в соответствие свой цвет
Z = clf.predict(poly_featurizer.transform(np.c_[xx.ravel(), yy.ravel()]))
Z = Z.reshape(xx.shape)
plt.contour(xx, yy, Z, cmap=plt.cm.Paired)
```
Полиномиальными признаками до степени $d$ для двух переменных $x_1$ и $x_2$ мы называем следующие:
$$\large \{x_1^d, x_1^{d-1}x_2, \ldots x_2^d\} = \{x_1^ix_2^j\}_{i+j=d, i,j \in \mathbb{N}}$$
Например, для $d=3$ это будут следующие признаки:
$$\large 1, x_1, x_2, x_1^2, x_1x_2, x_2^2, x_1^3, x_1^2x_2, x_1x_2^2, x_2^3$$
Нарисовав треугольник Пифагора, Вы сообразите, сколько таких признаков будет для $d=4,5...$ и вообще для любого $d$.
Попросту говоря, таких признаков экспоненциально много, и строить, скажем, для 100 признаков полиномиальные степени 10 может оказаться затратно (а более того, и не нужно).
Создадим объект `sklearn`, который добавит в матрицу $X$ полиномиальные признаки вплоть до степени 7.
```
poly = PolynomialFeatures(degree=7)
X_poly = poly.fit_transform(X)
X_poly.shape
```
Обучим логистическую регрессию с параметром регуляризации $C = 10^{-2}$. Изобразим разделяющую границу.
Также проверим долю правильных ответов классификатора на обучающей выборке. Видим, что регуляризация оказалась
слишком сильной, и модель "недообучилась".
```
C = 1e-2
logit = LogisticRegression(C=C, n_jobs=-1, random_state=17)
logit.fit(X_poly, y)
plot_boundary(logit, X, y, grid_step=.01, poly_featurizer=poly)
plt.scatter(X[y == 1, 0], X[y == 1, 1], c='green', label='Выпущен')
plt.scatter(X[y == 0, 0], X[y == 0, 1], c='red', label='Бракован')
plt.xlabel("Тест 1")
plt.ylabel("Тест 2")
plt.title('2 теста микрочипов. Логит с C=0.01')
plt.legend();
print("Доля правильных ответов классификатора на обучающей выборке:",
round(logit.score(X_poly, y), 3))
```
Увеличим $C$ до 1. Тем самым мы *ослабляем* регуляризацию, теперь в решении значния весов логистической регрессии могут оказаться больше (по модулю), чем в прошлом случае.
```
C = 1
logit = LogisticRegression(C=C, n_jobs=-1, random_state=17)
logit.fit(X_poly, y)
plot_boundary(logit, X, y, grid_step=.005, poly_featurizer=poly)
plt.scatter(X[y == 1, 0], X[y == 1, 1], c='green', label='Выпущен')
plt.scatter(X[y == 0, 0], X[y == 0, 1], c='red', label='Бракован')
plt.xlabel("Тест 1")
plt.ylabel("Тест 2")
plt.title('2 теста микрочипов. Логит с C=1')
plt.legend();
print("Доля правильных ответов классификатора на обучающей выборке:",
round(logit.score(X_poly, y), 3))
```
Еще увеличим $C$ – до 10 тысяч. Теперь регуляризации явно недостаточно, и мы наблюдаем переобучение. Можно заметить, что в прошлом случае (при $C$=1 и "гладкой" границе) доля правильных ответов модели на обучающей выборке не намного ниже, чем в 3 случае, зато на новой выборке, можно себе представить, 2 модель сработает намного лучше.
```
C = 1e4
logit = LogisticRegression(C=C, n_jobs=-1, random_state=17)
logit.fit(X_poly, y)
plot_boundary(logit, X, y, grid_step=.005, poly_featurizer=poly)
plt.scatter(X[y == 1, 0], X[y == 1, 1], c='green', label='Выпущен')
plt.scatter(X[y == 0, 0], X[y == 0, 1], c='red', label='Бракован')
plt.xlabel("Тест 1")
plt.ylabel("Тест 2")
plt.title('2 теста микрочипов. Логит с C=10k')
plt.legend();
print("Доля правильных ответов классификатора на обучающей выборке:",
round(logit.score(X_poly, y), 3))
```
Чтоб обсудить результаты, перепишем формулу для функционала, который оптимизируется в логистической регрессии, в таком виде:
$$J(X,y,w) = \mathcal{L} + \frac{1}{C}||w||^2,$$
где
- $\mathcal{L}$ – логистическая функция потерь, просуммированная по всей выборке
- $C$ – обратный коэффициент регуляризации (тот самый $C$ в `sklearn`-реализации `LogisticRegression`)
**Промежуточные выводы**:
- чем больше параметр $C$, тем более сложные зависимости в данных может восстанавливать модель (интуитивно $C$ соответствует "сложности" модели (model capacity))
- если регуляризация слишком сильная (малые значения $C$), то решением задачи минимизации логистической функции потерь может оказаться то, когда многие веса занулились или стали слишком малыми. Еще говорят, что модель недостаточно "штрафуется" за ошибки (то есть в функционале $J$ "перевешивает" сумма квадратов весов, а ошибка $\mathcal{L}$ может быть относительно большой). В таком случае модель окажется *недообученной* (1 случай)
- наоборот, если регуляризация слишком слабая (большие значения $C$), то решением задачи оптимизации может стать вектор $w$ с большими по модулю компонентами. В таком случае больший вклад в оптимизируемый функционал $J$ имеет $\mathcal{L}$ и, вольно выражаясь, модель слишком "боится" ошибиться на объектах обучающей выборки, поэтому окажется *переобученной* (3 случай)
- то, какое значение $C$ выбрать, сама логистическая регрессия "не поймет" (или еще говорят "не выучит"), то есть это не может быть определено решением оптимизационной задачи, которой является логистическая регрессия (в отличие от весов $w$). Так же точно, дерево решений не может "само понять", какое ограничение на глубину выбрать (за один процесс обучения). Поэтому $C$ – это *гиперпараметр* модели, который настраивается на кросс-валидации, как и *max_depth* для дерева.
**Настройка параметра регуляризации**
Теперь найдем оптимальное (в данном примере) значение параметра регуляризации $C$. Сделать это можно с помощью `LogisticRegressionCV` – перебора параметров по сетке с последующей кросс-валидацией. Этот класс создан специально для логистической регрессии (для нее известны эффективные алгоритмы перебора параметров), для произвольной модели мы бы использовали `GridSearchCV`, `RandomizedSearchCV` или, например, специальные алгоритмы оптимизации гиперпараметров, реализованные в `hyperopt`.
```
skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=17)
c_values = np.logspace(-2, 3, 500)
logit_searcher = LogisticRegressionCV(Cs=c_values, cv=skf, verbose=1, n_jobs=-1)
logit_searcher.fit(X_poly, y)
logit_searcher.C_
```
Посмотрим, как качество модели (доля правильных ответов на обучающей и валидационной выборках) меняется при изменении гиперпараметра $C$.
```
plt.plot(c_values, np.mean(logit_searcher.scores_[1], axis=0))
plt.xlabel('C')
plt.ylabel('Mean CV-accuracy');
```
Выделим участок с "лучшими" значениями C.
```
plt.plot(c_values, np.mean(logit_searcher.scores_[1], axis=0))
plt.xlabel('C')
plt.ylabel('Mean CV-accuracy');
plt.xlim((0,10));
```
Такие кривые называются *валидационными*, и в `sklearn` для них их построения есть специальные методы.
|
github_jupyter
|
from __future__ import division, print_function
# отключим всякие предупреждения Anaconda
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline
from matplotlib import pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV
from sklearn.model_selection import cross_val_score, StratifiedKFold
from sklearn.model_selection import GridSearchCV
# загрузка данных
data = pd.read_csv('../../data/microchip_tests.txt',
header=None, names = ('test1','test2','released'))
# информация о наборе данных
data.info()
data.head(5)
data.tail(5)
X = data.ix[:,:2].values
y = data.ix[:,2].values
plt.scatter(X[y == 1, 0], X[y == 1, 1], c='green', label='Выпущен')
plt.scatter(X[y == 0, 0], X[y == 0, 1], c='red', label='Бракован')
plt.xlabel("Тест 1")
plt.ylabel("Тест 2")
plt.title('2 теста микрочипов')
plt.legend();
def plot_boundary(clf, X, y, grid_step=.01, poly_featurizer=None):
x_min, x_max = X[:, 0].min() - .1, X[:, 0].max() + .1
y_min, y_max = X[:, 1].min() - .1, X[:, 1].max() + .1
xx, yy = np.meshgrid(np.arange(x_min, x_max, grid_step),
np.arange(y_min, y_max, grid_step))
# каждой точке в сетке [x_min, m_max]x[y_min, y_max]
# ставим в соответствие свой цвет
Z = clf.predict(poly_featurizer.transform(np.c_[xx.ravel(), yy.ravel()]))
Z = Z.reshape(xx.shape)
plt.contour(xx, yy, Z, cmap=plt.cm.Paired)
poly = PolynomialFeatures(degree=7)
X_poly = poly.fit_transform(X)
X_poly.shape
C = 1e-2
logit = LogisticRegression(C=C, n_jobs=-1, random_state=17)
logit.fit(X_poly, y)
plot_boundary(logit, X, y, grid_step=.01, poly_featurizer=poly)
plt.scatter(X[y == 1, 0], X[y == 1, 1], c='green', label='Выпущен')
plt.scatter(X[y == 0, 0], X[y == 0, 1], c='red', label='Бракован')
plt.xlabel("Тест 1")
plt.ylabel("Тест 2")
plt.title('2 теста микрочипов. Логит с C=0.01')
plt.legend();
print("Доля правильных ответов классификатора на обучающей выборке:",
round(logit.score(X_poly, y), 3))
C = 1
logit = LogisticRegression(C=C, n_jobs=-1, random_state=17)
logit.fit(X_poly, y)
plot_boundary(logit, X, y, grid_step=.005, poly_featurizer=poly)
plt.scatter(X[y == 1, 0], X[y == 1, 1], c='green', label='Выпущен')
plt.scatter(X[y == 0, 0], X[y == 0, 1], c='red', label='Бракован')
plt.xlabel("Тест 1")
plt.ylabel("Тест 2")
plt.title('2 теста микрочипов. Логит с C=1')
plt.legend();
print("Доля правильных ответов классификатора на обучающей выборке:",
round(logit.score(X_poly, y), 3))
C = 1e4
logit = LogisticRegression(C=C, n_jobs=-1, random_state=17)
logit.fit(X_poly, y)
plot_boundary(logit, X, y, grid_step=.005, poly_featurizer=poly)
plt.scatter(X[y == 1, 0], X[y == 1, 1], c='green', label='Выпущен')
plt.scatter(X[y == 0, 0], X[y == 0, 1], c='red', label='Бракован')
plt.xlabel("Тест 1")
plt.ylabel("Тест 2")
plt.title('2 теста микрочипов. Логит с C=10k')
plt.legend();
print("Доля правильных ответов классификатора на обучающей выборке:",
round(logit.score(X_poly, y), 3))
skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=17)
c_values = np.logspace(-2, 3, 500)
logit_searcher = LogisticRegressionCV(Cs=c_values, cv=skf, verbose=1, n_jobs=-1)
logit_searcher.fit(X_poly, y)
logit_searcher.C_
plt.plot(c_values, np.mean(logit_searcher.scores_[1], axis=0))
plt.xlabel('C')
plt.ylabel('Mean CV-accuracy');
plt.plot(c_values, np.mean(logit_searcher.scores_[1], axis=0))
plt.xlabel('C')
plt.ylabel('Mean CV-accuracy');
plt.xlim((0,10));
| 0.485844 | 0.971886 |
# DataLoader in PyTorch
__DataLoader__ -- класс в PyTorch, который позволяет итеративно проходить по датасету, отвечает за оркестрацию всего процесса работы с датасетом.
```
DataLoader(
dataset,
batch_size=1,
shuffle=False,
sampler=None,
batch_sampler=None,
num_workers=0,
collate_fn=None,
pin_memory=False,
drop_last=False,
timeout=0,
worker_init_fn=None,
prefetch_factor=2,
persistent_workers=False
)
```
- __dataset__ -- позволяет создать кастомные классы для работы с датасетом, где можно указать логику формирвоания батча.
- __sampler__ -- определяет порядок элементов из датасета, которые будут идти в батч, то есть это список индексов, объединенных в батч. Удобно переопределять, когда обучение распредленное.
- __collate_fn__ -- позволяет сделать финальную предобработку над батчем данных. Если, например, в батч попали последовательности разных размеров, то после уже сбора батча, можно будет дополнить последовательности нулями относительно максимально длиной последовательности.
## Custom Dataset
```
import pandas as pd
import pickle
import numpy as np
from tqdm import tqdm_notebook
from torch.utils.data import DataLoader, Dataset, Sampler
from torch.utils.data.dataloader import default_collate
BATCH_SIZE = 128
EPOCHS = 100
class CustomDataset(Dataset):
# Конструктор, где считаем датасет
def __init__(self, dataset_path):
with open(dataset_path, 'rb') as f:
self.X, self.target = pickle.load(f)
return
# Переопределяем метод вычисление размера датасета
def __len__(self):
return len(self.X)
# Переопределяем метод,
# который достает по индексу наблюдение из датасет
def __getitem__(self, idx):
return self.X[idx], self.target[idx]
```
## Custom Sampler
```
class CustomSampler(Sampler):
# Конструктор, где инициализируем индексы элементов
def __init__(self, data):
self.data_indices = np.arange(len(data))
shuffled_indices = np.random.permutation(len(self.data_indices))
self.data_indices = np.ascontiguousarray(self.data_indices)[shuffled_indices]
return
def __len__(self):
return len(self.data_indices)
# Возращает итератор,
# который будет возвращать индексы из перемешанного датасета
def __iter__(self):
return iter(self.data_indices)
```
## Custom collate_fn
```
def collate(batch):
return default_collate(batch)
def create_data_loader(train_dataset, train_sampler,
test_dataset, test_sampler):
train_loader = DataLoader(dataset=train_dataset, sampler=train_sampler,
batch_size=BATCH_SIZE, collate_fn=collate,
shuffle=False)
test_loader = DataLoader(dataset=test_dataset, sampler=test_sampler,
batch_size=BATCH_SIZE, collate_fn=collate,
shuffle=False)
return train_loader, test_loader
!git clone https://github.com/RiskModellingResearch/DeepLearning_Winter22.git
# Создаем объекты Custom Dataset и Sampler
train_ds = CustomDataset('DeepLearning_Winter22/week_03/data/X_train_cat.pickle')
train_sampler = CustomSampler(train_ds.X)
test_ds = CustomDataset('DeepLearning_Winter22/week_03/data/X_test_cat.pickle')
test_sampler = CustomSampler(test_ds.X)
train_loader, test_loader = create_data_loader(train_ds, train_sampler,
test_ds, test_sampler)
def run_train():
for epoch in tqdm_notebook(range(EPOCHS)):
for features, labels in train_loader:
pass
return
run_train()
```
|
github_jupyter
|
DataLoader(
dataset,
batch_size=1,
shuffle=False,
sampler=None,
batch_sampler=None,
num_workers=0,
collate_fn=None,
pin_memory=False,
drop_last=False,
timeout=0,
worker_init_fn=None,
prefetch_factor=2,
persistent_workers=False
)
import pandas as pd
import pickle
import numpy as np
from tqdm import tqdm_notebook
from torch.utils.data import DataLoader, Dataset, Sampler
from torch.utils.data.dataloader import default_collate
BATCH_SIZE = 128
EPOCHS = 100
class CustomDataset(Dataset):
# Конструктор, где считаем датасет
def __init__(self, dataset_path):
with open(dataset_path, 'rb') as f:
self.X, self.target = pickle.load(f)
return
# Переопределяем метод вычисление размера датасета
def __len__(self):
return len(self.X)
# Переопределяем метод,
# который достает по индексу наблюдение из датасет
def __getitem__(self, idx):
return self.X[idx], self.target[idx]
class CustomSampler(Sampler):
# Конструктор, где инициализируем индексы элементов
def __init__(self, data):
self.data_indices = np.arange(len(data))
shuffled_indices = np.random.permutation(len(self.data_indices))
self.data_indices = np.ascontiguousarray(self.data_indices)[shuffled_indices]
return
def __len__(self):
return len(self.data_indices)
# Возращает итератор,
# который будет возвращать индексы из перемешанного датасета
def __iter__(self):
return iter(self.data_indices)
def collate(batch):
return default_collate(batch)
def create_data_loader(train_dataset, train_sampler,
test_dataset, test_sampler):
train_loader = DataLoader(dataset=train_dataset, sampler=train_sampler,
batch_size=BATCH_SIZE, collate_fn=collate,
shuffle=False)
test_loader = DataLoader(dataset=test_dataset, sampler=test_sampler,
batch_size=BATCH_SIZE, collate_fn=collate,
shuffle=False)
return train_loader, test_loader
!git clone https://github.com/RiskModellingResearch/DeepLearning_Winter22.git
# Создаем объекты Custom Dataset и Sampler
train_ds = CustomDataset('DeepLearning_Winter22/week_03/data/X_train_cat.pickle')
train_sampler = CustomSampler(train_ds.X)
test_ds = CustomDataset('DeepLearning_Winter22/week_03/data/X_test_cat.pickle')
test_sampler = CustomSampler(test_ds.X)
train_loader, test_loader = create_data_loader(train_ds, train_sampler,
test_ds, test_sampler)
def run_train():
for epoch in tqdm_notebook(range(EPOCHS)):
for features, labels in train_loader:
pass
return
run_train()
| 0.59749 | 0.886862 |
<a href="https://colab.research.google.com/github/unpackAI/DL101/blob/main/01_CV_Workbook.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# 💻Week 1 Workbook of unpackAI "DL101 Bootcamp"
## 📕 Learning Objectives of the Week
* Understand the fundamental process of building your own image classification project.
## 🔗 Install & Import Required Code Packages
```
!pip install -Uqq fastbook
!pip install -Uqq unpackai
from unpackai.utils import clean_error_img
from fastbook import *
from fastai.vision.widgets import *
```
## unpackAI Assignment Section
* **Assignment 1**: Go through the multiple choice questions below and choose the correct answer. Discuss during the presentation session.
* **Assignment 2**: Build an entire single label classification model starting from defining your objective, gathering data to training your model and interpreting the results.
## Assigment 1: Go through the multiple choice questions below and choose the correct answer. Discuss during the presentation session.
1. What is overfitting?
> A: Overfitting is a scenario in data science where a data model is unable to capture the relationship between the input and output variables accurately, generating a high loss on both the training set and unseen data.
> B: Overfitting happens when a model learns and memorizes the detail and noise in the training data to the extent that it negatively impacts the performance of the model on new data. This results in a further decreasing loss on the training set but an increasing loss on the validation set.
> C: Overfitting is when the model is performing very well on the training, validation and test set and achieves a very low loss in all 3 data sets. This means that your model is ready to be used in practice.
2. What is a training, validation and test set?
> A: The *training set* is the sample of data used to start fitting and training the model. The *test set* is the sample of data used to evaluate the trained models performance on new data while continuing to improve and tune the model's parameters. The *validation set* is the sample of data used to evaluate the final model without continuing to tune its parameters.
> B: The *test set* is the sample of data used to start fitting and training the model. The *training set* is the sample of data used to evaluate the trained models performance on new data while continuing to improve and tune the model's parameters. The *validation set* is the sample of data used to evaluate the final model without continuing to tune its parameters.
> C: The *training set* is the sample of data used to start fitting and training the model. The *validation set* is the sample of data used to evaluate the trained models performance on new data while continuing to improve and tune the model's parameters. The *test set* is the sample of data used to evaluate the final model without continuing to tune its parameters.
3. Observe the `Learner` that you have encountered in the Coursebook. What are the key **metrics** to represent the model's performance? Name and explain them.
> A: The key metrics are *epochs* and *time*. Both give us key insights how much memory the model requires and its performance.
> B: The key metrics are *train_loss*, *valid_loss* and *error rate / accuray*. Each metric provides insights on the model's performance on the training set, validation set and test set.
> C: The single most important metric is the *loss*. The loss is what tells the machine and human how the model performs.
4. What is the difference between Classification and Regression in Machine Learning?
> A: A *classification model* is one that attempts to predict a class, or category. That is, it's predicting from a number of discrete possibilities, such as "dog" or "cat". A *regression model* is one that attempts to predict one or more numeric quantities, such as a temperature or a location.
> B: A *regression model* is one that attempts to predict a class, or category. That is, it's predicting from a number of discrete possibilities, such as "dog" or "cat". A *classification model* is one that attempts to predict one or more numeric quantities, such as a temperature or a location.
> C: A *classification model* is one that attempts to utilize sorting algorithms that can allow it to learn time-dependent patterns across multiples models different from images and speech. A *regression model* is one that attempts to to predict the "rating" or "preference" a user would give to an item.
5. What is transfer learning in Machine Learning?
> A: Transfer learning is a machine learning technique where the parameters of a model are not updated through model training but simply replaced by the parameters of another pre-trained model.
> B: Transfer learning is a technique where a machine learning engineer uses his expertise and techniques in another industry to a problem in an industry he does not have much experience in.
> C: A: Transfer learning is a machine learning technique where a pre-trained model is re-purposed on a second related task. In practice this means that, we use an existing already-trained model and its trained "intelligence" to utilize it for our own use case.
## Assignment 2: Build an entire single label classification model starting from defining your objective, gathering data to training your model and interpreting the results.
### Step One - Define a ML problem and propose a solution.
#### 1. Define the objective (what goal, metrics to measure success)
**Your objective:**
*Example objective: The goal is to build a model that is able to differentiate between grizzly bears, black bears and teddy bears with a minimum accuracy of 90%.*
#### 2. Describe your dataset (what data, how many classes etc.)
**Your dataset:**
*Example dataset: The dataset consists of a total of 450 images seperated in 3 different classes that each have a set of 150 images. The three classes are: "black", "grizzly" and "teddy".*
#### 3. Describe your model (what is the model supposed to do)
**Your model:**
*Example model: A 3-class, single-label classification model, which correctly classifies an image that it has never seen before in one of three classes named above.*
### Step Two - Collect and construct your dataset
In order to collect and design your own dataset we provide you with the scraping tool below.
**DuckDuckGo Image Scraper** - Scrape and Collect images via a search engine from the web.
In order to collect your images we will be utilizing **DuckDuckGo**. DuckDuckGo is an internet search engine that emphasizes protecting searchers' privacy and avoiding the filter bubble of personalized search results.
The below is a slightly modified version of the notebook by [Jew Dockrill](https://joedockrill.github.io/jmd_imagescraper/). Many thanks to him for the notebook and the package he wrote.
> Note: When using a Web Search Engine to download your pictures, there's no way to be sure exactly what images a search like this will find. The results can change over time. We've heard of at least one case of a community member who found some unpleasant pictures in their search results. You'll receive whatever images are found by the web search engine. If you're running this at work, or with kids, etc, then be cautious before you display the downloaded images.
##### Install & import `DuckDuckGoImageScraper` specific packages
```
!pip install -q jmd_imagescraper
from jmd_imagescraper.imagecleaner import *
from pathlib import Path
from jmd_imagescraper.core import *
```
##### Download images
Below you can see an example, of what changes would be required to work with a teddy, grizzly and black bear dataset.
```
IMAGE_DIR = Path("/content/gdrive/MyDrive/images") # Comment: You can store in a different Google Drive Folder
number_images_to_download = 50
duckduckgo_search(IMAGE_DIR, "teddy", "teddy bear", max_results=number_images_to_download)
duckduckgo_search(IMAGE_DIR, "grizzly", "grizzly bear", max_results=number_images_to_download)
duckduckgo_search(IMAGE_DIR, "black", "black bear", max_results=number_images_to_download)
```
Now modify the code below for your own use case. You will have to:
1. `path`: Define the `path` which will be the folder in which you will temporarily save the images. Once you disconnect, the images will be deleted.
1. `number_images_to_download`: The number of images you download per defined class. This can go up to 477 at the time of writing.
1. `duckduckgo_search`: You will have to define your **classes** (for example: teddy, grizzly, black) and your **search term** (for example: teddy bear, grizzly bear, black bear).
> Note: When downloading the images please make sure to check your search terms ahead of running the script below. Go and search for yourself to define the best search terms.
```
path = Path().cwd()/"images"
number_images_to_download = 50
duckduckgo_search(path, "apple", "apple in the basket", max_results=number_images_to_download)
duckduckgo_search(path, "orange", "orange", max_results=number_images_to_download)
duckduckgo_search(path, "pineapple", "pineapple", max_results=number_images_to_download)
```
##### Displaying the image cleaner
Use this to get rid of unsuitable images without leaving your notebook.
```
display_image_cleaner(path)
```
Some time especially in real life projects, erronous image is a frequently occuring thing, you can fix the problem by deleting all of our faulty images.
**We leave it up to you to finish your own model using your own dataset!**
```
clean_error_img(path)
```
#### Your own data
If you want to use your own personal data, you can upload your images using the file system on the right (see the image below).
> Hint: Make sure to define your path so that it points to your main folder that contains your labeled folders with the images. A great way to do that is to use the *folder* icon on the left, to find the right folder, right click and *copy path*.
```
path = Path("insert path here")
```
**However, considering your own time, we recommend you to use the DuckDuckGo Image Scraper above.**
### Step Three - Data Transformation: Create your **DataLoaders** and utilize **Data Augmentation Methods** to improve your dataset.
```
```
### Step Three - Train your model.
```
```
### Step Four - Interpret the model and make predictions: Create a notebook app to upload and classify external images.
> Hint: Utilize the *top losses* and *classification marix* methods seen in the coursebook to interpret your model.
```
```
|
github_jupyter
|
!pip install -Uqq fastbook
!pip install -Uqq unpackai
from unpackai.utils import clean_error_img
from fastbook import *
from fastai.vision.widgets import *
!pip install -q jmd_imagescraper
from jmd_imagescraper.imagecleaner import *
from pathlib import Path
from jmd_imagescraper.core import *
IMAGE_DIR = Path("/content/gdrive/MyDrive/images") # Comment: You can store in a different Google Drive Folder
number_images_to_download = 50
duckduckgo_search(IMAGE_DIR, "teddy", "teddy bear", max_results=number_images_to_download)
duckduckgo_search(IMAGE_DIR, "grizzly", "grizzly bear", max_results=number_images_to_download)
duckduckgo_search(IMAGE_DIR, "black", "black bear", max_results=number_images_to_download)
path = Path().cwd()/"images"
number_images_to_download = 50
duckduckgo_search(path, "apple", "apple in the basket", max_results=number_images_to_download)
duckduckgo_search(path, "orange", "orange", max_results=number_images_to_download)
duckduckgo_search(path, "pineapple", "pineapple", max_results=number_images_to_download)
display_image_cleaner(path)
clean_error_img(path)
path = Path("insert path here")
```
### Step Three - Train your model.
### Step Four - Interpret the model and make predictions: Create a notebook app to upload and classify external images.
> Hint: Utilize the *top losses* and *classification marix* methods seen in the coursebook to interpret your model.
| 0.688049 | 0.995327 |
```
import os
# Find the latest version of spark 3.0 from http://www.apache.org/dist/spark/ and enter as the spark version
# For example:
# spark_version = 'spark-3.0.1'
spark_version = 'spark-3.0.3'
os.environ['SPARK_VERSION']=spark_version
# Install Spark and Java
!apt-get update
!apt-get install openjdk-11-jdk-headless -qq > /dev/null
!wget -q http://www.apache.org/dist/spark/$SPARK_VERSION/$SPARK_VERSION-bin-hadoop2.7.tgz
!tar xf $SPARK_VERSION-bin-hadoop2.7.tgz
!pip install -q findspark
# Set Environment Variables
import os
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-11-openjdk-amd64"
os.environ["SPARK_HOME"] = f"/content/{spark_version}-bin-hadoop2.7"
# Start a SparkSession
import findspark
findspark.init()
!wget https://jdbc.postgresql.org/download/postgresql-42.2.9.jar
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName("BigData-HW-Part-1").config("spark.driver.extraClassPath", "/content.postgressql-42.2.9.jar").getOrCreate()
#Load Amazon data into Spark DataFrame
from pyspark import SparkFiles
url = 'https://s3.amazonaws.com/amazon-reviews-pds/tsv/amazon_reviews_us_Video_Games_v1_00.tsv.gz'
spark.sparkContext.addFile(url)
video_game_df = spark.read.csv(SparkFiles.get("amazon_reviews_us_Video_Games_v1_00.tsv.gz"), sep='\t', header=True, inferSchema=True)
video_game_df.show()
#Size of Data
video_game_df.count()
#Clean DataFrame to match tables
from pyspark.sql.functions import to_date
review_id_df = video_game_df.select(['review_id', 'customer_id', 'product_id', 'product_parent', to_date('review_date', 'yyyy-MM-dd').alias('review_date')])
review_id_df.show()
products_df = video_game_df.select(['product_id', 'product_title']).drop_duplicates()
products_df.show()
reviews_df = video_game_df.select(['review_id', 'review_headline', 'review_body'])
reviews_df.show(10)
customers_df = video_game_df.groupBy('customer_id').agg({'customer_id': 'count'}).withColumnRenamed('count(customer_id', 'customer_count')
customers_df.show()
vine_df = video_game_df.select(['review_id', 'star_rating', 'helpful_votes', 'total_votes', 'vine'])
vine_df.show(10)
#Schema
''''CREATE TABLE review_id_table (
review_id INT,
customer_id TEXT,
product_id TEXT,
product_parent TEXT,
review_date DATE
);
CREATE TABLE product (
product_id TEXT,
product_title TEXT
);
CREATE TABLE customers (
customer_id INT
customer_count INT
);
CREATE TABLE vine_table (
review_id TEXT,
star_rating INT,
helpful_votes INT,
total_votes INT,
vine TEXT
);
'''
#Push to AWS RDS instance
mode = 'append'
jdbc_url = ''
config = {'user':'', 'password': '', 'driver': 'org.postgresql.Driver'}
#write review_id to table in RDS
review_id_df.write.jdbc(url=jdbc_url, table='review_id_table', mode=mode, properties=config)
```
|
github_jupyter
|
import os
# Find the latest version of spark 3.0 from http://www.apache.org/dist/spark/ and enter as the spark version
# For example:
# spark_version = 'spark-3.0.1'
spark_version = 'spark-3.0.3'
os.environ['SPARK_VERSION']=spark_version
# Install Spark and Java
!apt-get update
!apt-get install openjdk-11-jdk-headless -qq > /dev/null
!wget -q http://www.apache.org/dist/spark/$SPARK_VERSION/$SPARK_VERSION-bin-hadoop2.7.tgz
!tar xf $SPARK_VERSION-bin-hadoop2.7.tgz
!pip install -q findspark
# Set Environment Variables
import os
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-11-openjdk-amd64"
os.environ["SPARK_HOME"] = f"/content/{spark_version}-bin-hadoop2.7"
# Start a SparkSession
import findspark
findspark.init()
!wget https://jdbc.postgresql.org/download/postgresql-42.2.9.jar
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName("BigData-HW-Part-1").config("spark.driver.extraClassPath", "/content.postgressql-42.2.9.jar").getOrCreate()
#Load Amazon data into Spark DataFrame
from pyspark import SparkFiles
url = 'https://s3.amazonaws.com/amazon-reviews-pds/tsv/amazon_reviews_us_Video_Games_v1_00.tsv.gz'
spark.sparkContext.addFile(url)
video_game_df = spark.read.csv(SparkFiles.get("amazon_reviews_us_Video_Games_v1_00.tsv.gz"), sep='\t', header=True, inferSchema=True)
video_game_df.show()
#Size of Data
video_game_df.count()
#Clean DataFrame to match tables
from pyspark.sql.functions import to_date
review_id_df = video_game_df.select(['review_id', 'customer_id', 'product_id', 'product_parent', to_date('review_date', 'yyyy-MM-dd').alias('review_date')])
review_id_df.show()
products_df = video_game_df.select(['product_id', 'product_title']).drop_duplicates()
products_df.show()
reviews_df = video_game_df.select(['review_id', 'review_headline', 'review_body'])
reviews_df.show(10)
customers_df = video_game_df.groupBy('customer_id').agg({'customer_id': 'count'}).withColumnRenamed('count(customer_id', 'customer_count')
customers_df.show()
vine_df = video_game_df.select(['review_id', 'star_rating', 'helpful_votes', 'total_votes', 'vine'])
vine_df.show(10)
#Schema
''''CREATE TABLE review_id_table (
review_id INT,
customer_id TEXT,
product_id TEXT,
product_parent TEXT,
review_date DATE
);
CREATE TABLE product (
product_id TEXT,
product_title TEXT
);
CREATE TABLE customers (
customer_id INT
customer_count INT
);
CREATE TABLE vine_table (
review_id TEXT,
star_rating INT,
helpful_votes INT,
total_votes INT,
vine TEXT
);
'''
#Push to AWS RDS instance
mode = 'append'
jdbc_url = ''
config = {'user':'', 'password': '', 'driver': 'org.postgresql.Driver'}
#write review_id to table in RDS
review_id_df.write.jdbc(url=jdbc_url, table='review_id_table', mode=mode, properties=config)
| 0.299208 | 0.14069 |
## Age-structured SIR model for India
We can now generalise the model in example-2 to more than two age groups. Assume that the population has been partitioned into $i=1,\ldots, M$ age groups and that we have available the $M\times M$ contact matrix $C_{ij}$. These are a sum of contributions from contacts at home, workplace, schools and all other public spheres. Using superscripts $H$, $W$, $S$ and $O$ for each of these, we write the contact matrix as
$$
C_{ij} = C^H_{ij} + C^W_{ij} + C^S_{ij} + C^O_{ij}
$$
We read in these contact matrices from the data sets provided in the paper *Projecting social contact matrices in 152 countries using contact surveys and demographic data* by Prem et al, sum them to obtain the total contact matrix. We also read in the age distribution of India obtained from the *Population pyramid* website. The infection parameter $\beta$ is unknown, so we fit it to the case data till 25th March. The hope is that this will take into account the fact that there are, in reality, asymptomatic infectives, and that the symptomatic cases are an understimate. Also, the model is being applied to the whole of India, where the well-mixedness of the population, implicit in a non-spatial model, breaks down. This type of fitting gets more accurate, though, when it is applied to more local regions, where the well-mixedness is a better assumption. We then run the simulation, assuming all initial cases are symptomatic, and remain so. This will be an underestimate in the initial stages but once the epidemic enters the non-linear phase, there will be must less sensitivity to the initial condition. At the end of the simulation, we extract the number of susceptibles remaining in each age group, and the difference with the initial number of susceptibles is the total number that are infected. We multiply this with mortality data from China to obtain mortality estimates.
```
%%capture
## compile PyRoss for this notebook
import os
owd = os.getcwd()
os.chdir('../')
%run setup.py install
os.chdir(owd)
%matplotlib inline
import numpy as np
import pyross
import pandas as pd
import matplotlib.pyplot as plt
M=16 # number of age groups
# load age structure data
my_data = np.genfromtxt('data/age_structures/India-2019.csv', delimiter=',', skip_header=1)
aM, aF = my_data[:, 1], my_data[:, 2]
# set age groups
Ni=aM+aF; Ni=Ni[0:M]; N=np.sum(Ni)
# contact matrices
my_data = pd.read_excel('data/contact_matrices_152_countries/MUestimates_home_1.xlsx', sheet_name='India',index_col=None)
CH = np.array(my_data)
my_data = pd.read_excel('data/contact_matrices_152_countries/MUestimates_work_1.xlsx', sheet_name='India',index_col=None)
CW = np.array(my_data)
my_data = pd.read_excel('data/contact_matrices_152_countries/MUestimates_school_1.xlsx', sheet_name='India',index_col=None)
CS = np.array(my_data)
my_data = pd.read_excel('data/contact_matrices_152_countries/MUestimates_other_locations_1.xlsx', sheet_name='India',index_col=None)
CO = np.array(my_data)
my_data = pd.read_excel('data/contact_matrices_152_countries/MUestimates_all_locations_1.xlsx', sheet_name='India',index_col=None)
CA = np.array(my_data)
# matrix of total contacts
C=CH+CW+CS+CO
beta = 0.01646692 # infection rate
gIa = 1./7 # recovery rate of asymptomatic infectives
gIs = 1./7 # recovery rate of symptomatic infectives
alpha = 0. # fraction of asymptomatic infectives
fsa = 1 # the self-isolation parameter
# initial conditions
Is_0 = np.zeros((M)); Is_0[6:13]=3; Is_0[2:6]=1
Ia_0 = np.zeros((M))
R_0 = np.zeros((M))
S_0 = Ni - (Ia_0 + Is_0 + R_0)
# matrix for linearised dynamics
L0 = np.zeros((M, M))
L = np.zeros((2*M, 2*M))
for i in range(M):
for j in range(M):
L0[i,j]=C[i,j]*Ni[i]/Ni[j]
L[0:M, 0:M] = alpha*beta/gIs*L0
L[0:M, M:2*M] = fsa*alpha*beta/gIs*L0
L[M:2*M, 0:M] = ((1-alpha)*beta/gIs)*L0
L[M:2*M, M:2*M] = fsa*((1-alpha)*beta/gIs)*L0
r0 = np.max(np.linalg.eigvals(L))
print("The basic reproductive ratio for these parameters is", r0)
# duration of simulation and data file
Tf=21*2-1; Nf=2000;
# the contact structure is independent of time
def contactMatrix(t):
return C
# intantiate model
parameters = {'alpha':alpha,'beta':beta, 'gIa':gIa,'gIs':gIs,'fsa':fsa}
model = pyross.models.SIR(parameters, M, Ni)
# run model
data=model.simulate(S_0, Ia_0, Is_0, contactMatrix, Tf, Nf)
t = data['t']; IC = np.zeros((Nf))
for i in range(M):
IC += data['X'][:,2*M+i]
fig = plt.figure(num=None, figsize=(10, 8), dpi=80, facecolor='w', edgecolor='k')
plt.rcParams.update({'font.size': 22})
plt.plot(t, IC, '-', lw=4, color='#A60628', label='forecast', alpha=0.8)
plt.xticks(np.arange(0, Tf, 10), ('4 Mar', '18 Mar', '24 Mar', '3 Apr', '13 Apr', '23 Apr'));
my_data = np.genfromtxt('data/covid-cases/india.txt', delimiter='', skip_header=6)
day, cases = my_data[:,0], my_data[:,3] - my_data[:,1]
plt.plot(cases, 'o-', lw=4, color='#348ABD', ms=16, label='data', alpha=0.5)
plt.legend(fontsize=26, loc='upper left'); plt.grid()
plt.autoscale(enable=True, axis='x', tight=True)
plt.ylabel('Infected individuals'); plt.xlim(0, 40); plt.ylim(0, 9999);
#plt.savefig('/Users/rsingh/Desktop/2a.png', format='png', dpi=212)
cases
C=CH+CW+CS+CO
Tf=200;
# matrix for linearised dynamics
L0 = np.zeros((M, M))
L = np.zeros((2*M, 2*M))
for i in range(M):
for j in range(M):
L0[i,j]=C[i,j]*Ni[i]/Ni[j]
L[0:M, 0:M] = alpha*beta/gIs*L0
L[0:M, M:2*M] = fsa*alpha*beta/gIs*L0
L[M:2*M, 0:M] = ((1-alpha)*beta/gIs)*L0
L[M:2*M, M:2*M] = fsa*((1-alpha)*beta/gIs)*L0
r0 = np.max(np.linalg.eigvals(L))
print("The basic reproductive ratio for these parameters is", r0)
def contactMatrix(t):
return C
# start simulation
Nf=2000;
data=model.simulate(S_0, Ia_0, Is_0, contactMatrix, Tf, Nf)
t = data.get('t'); IC = np.zeros((Nf)); SC = np.zeros((Nf))
for i in range(M):
SC += data.get('X')[:,0*M+i]
IC += data.get('X')[:,2*M+i]
fig = plt.figure(num=None, figsize=(10, 8), dpi=80, facecolor='w', edgecolor='k')
plt.rcParams.update({'font.size': 22})
plt.plot(t, SC*10**(-6), '-', lw=4, color='#348ABD', label='susceptible', alpha=0.8,)
plt.fill_between(t, 0, SC*10**(-6), color="#348ABD", alpha=0.3)
plt.plot(t, IC*10**(-6), '-', lw=4, color='#A60628', label='infected', alpha=0.8)
plt.fill_between(t, 0, IC*10**(-6), color="#A60628", alpha=0.3)
my_data = np.genfromtxt('data/covid-cases/india.txt', delimiter='', skip_header=6)
day, cases = my_data[:,0], my_data[:,3] - my_data[:,1]
plt.plot(cases*10**(-6), 'ro-', lw=4, color='dimgrey', ms=16, label='data', alpha=0.5)
plt.legend(fontsize=26); plt.grid()
plt.autoscale(enable=True, axis='x', tight=True)
plt.ylabel('Individuals (millions)')
plt.plot(t*0+t[np.argsort(IC)[-1]], -170+.4*SC*10**(-6), lw=4, color='g', alpha=0.8)
plt.xticks(np.arange(0, 200, 30), ('4 Mar', '3 Apr', '3 May', '2 Jun', '2 Jul', '1 Aug', '31 Aug'));
#plt.savefig('/Users/rsingh/Desktop/2b.png', format='png', dpi=212)
IC[np.argsort(IC)[-1]]
IC
cases
# matrix for linearised dynamics
L0 = np.zeros((M, M))
L = np.zeros((2*M, 2*M))
xind=[np.argsort(IC)[-1]]
rr = np.zeros((Tf))
for tt in range(Tf):
Si = np.array((data['X'][tt*10,0:M])).flatten()
for i in range(M):
for j in range(M):
L0[i,j]=C[i,j]*Si[i]/Ni[j]
L[0:M, 0:M] = alpha*beta/gIs*L0
L[0:M, M:2*M] = fsa*alpha*beta/gIs*L0
L[M:2*M, 0:M] = ((1-alpha)*beta/gIs)*L0
L[M:2*M, M:2*M] = fsa*((1-alpha)*beta/gIs)*L0
rr[tt] = np.real(np.max(np.linalg.eigvals(L)))
fig = plt.figure(num=None, figsize=(10, 8), dpi=80, facecolor='w', edgecolor='k')
plt.rcParams.update({'font.size': 22})
plt.plot(t[::10], rr, 'o', lw=4, color='#A60628', label='suscetible', alpha=0.8,)
plt.fill_between(t, 0, t*0+1, color="dimgrey", alpha=0.2); plt.ylabel('Basic reproductive ratio')
plt.ylim(np.min(rr)-.1, np.max(rr)+.1)
plt.xticks(np.arange(0, 200, 30), ('4 Mar', '3 Apr', '3 May', '2 Jun', '2 Jul', '1 Aug', '31 Aug'));
#plt.savefig('/Users/rsingh/Desktop/test.png', format='png', dpi=212)
fig = plt.figure(num=None, figsize=(10, 8), dpi=80, facecolor='w', edgecolor='k')
plt.rcParams.update({'font.size': 22})
plt.bar(np.arange(16),data.get('X')[0,0:M]*10**(-6), label='susceptible (initial)', alpha=0.8)
plt.bar(np.arange(16),data.get('X')[-1,0:M]*10**(-6), label='susceptible (final)', alpha=0.8)
plt.xticks(np.arange(-0.4, 16.45, 3.95), ('0', '20', '40', '60', '80'));
plt.xlim(-0.45, 15.45); plt.ylabel('Individuals (millions)'); plt.xlabel('Age')
plt.legend(fontsize=22); plt.axis('tight')
plt.autoscale(enable=True, axis='x', tight=True)
#plt.savefig('/Users/rsingh/Desktop/3a.png', format='png', dpi=212)
MM = np.array((0,0,.2,.2,.2,.2,.2,.2,.4,.4,1.3,1.3,3.6,3.6,8,8))
## Fatality Rate by AGE per 100 cases
fig = plt.figure(num=None, figsize=(10, 8), dpi=80, facecolor='w', edgecolor='k')
plt.rcParams.update({'font.size': 22})
m1 = .01*MM*(data.get('X')[0,0:M]-data['X'][-1,0:M])
plt.bar(np.arange(16),m1*10**(-6), label='susceptible (final)', alpha=0.8)
plt.axis('tight'); plt.xticks(np.arange(-0.4, 16.45, 3.95), ('0', '20', '40', '60', '80'));
plt.xlim(-0.45, 15.45); plt.ylabel('Mortality (millions)'); plt.xlabel('Age')
plt.autoscale(enable=True, axis='x', tight=True)
#plt.savefig('/Users/rsingh/Desktop/3b.png', format='png', dpi=212)
m1 = .01*MM*(data['X'][0,0:M]-data['X'][-1,0:M])
np.sum(m1)
SC[0]-SC[-1]
900*10^7
```
|
github_jupyter
|
%%capture
## compile PyRoss for this notebook
import os
owd = os.getcwd()
os.chdir('../')
%run setup.py install
os.chdir(owd)
%matplotlib inline
import numpy as np
import pyross
import pandas as pd
import matplotlib.pyplot as plt
M=16 # number of age groups
# load age structure data
my_data = np.genfromtxt('data/age_structures/India-2019.csv', delimiter=',', skip_header=1)
aM, aF = my_data[:, 1], my_data[:, 2]
# set age groups
Ni=aM+aF; Ni=Ni[0:M]; N=np.sum(Ni)
# contact matrices
my_data = pd.read_excel('data/contact_matrices_152_countries/MUestimates_home_1.xlsx', sheet_name='India',index_col=None)
CH = np.array(my_data)
my_data = pd.read_excel('data/contact_matrices_152_countries/MUestimates_work_1.xlsx', sheet_name='India',index_col=None)
CW = np.array(my_data)
my_data = pd.read_excel('data/contact_matrices_152_countries/MUestimates_school_1.xlsx', sheet_name='India',index_col=None)
CS = np.array(my_data)
my_data = pd.read_excel('data/contact_matrices_152_countries/MUestimates_other_locations_1.xlsx', sheet_name='India',index_col=None)
CO = np.array(my_data)
my_data = pd.read_excel('data/contact_matrices_152_countries/MUestimates_all_locations_1.xlsx', sheet_name='India',index_col=None)
CA = np.array(my_data)
# matrix of total contacts
C=CH+CW+CS+CO
beta = 0.01646692 # infection rate
gIa = 1./7 # recovery rate of asymptomatic infectives
gIs = 1./7 # recovery rate of symptomatic infectives
alpha = 0. # fraction of asymptomatic infectives
fsa = 1 # the self-isolation parameter
# initial conditions
Is_0 = np.zeros((M)); Is_0[6:13]=3; Is_0[2:6]=1
Ia_0 = np.zeros((M))
R_0 = np.zeros((M))
S_0 = Ni - (Ia_0 + Is_0 + R_0)
# matrix for linearised dynamics
L0 = np.zeros((M, M))
L = np.zeros((2*M, 2*M))
for i in range(M):
for j in range(M):
L0[i,j]=C[i,j]*Ni[i]/Ni[j]
L[0:M, 0:M] = alpha*beta/gIs*L0
L[0:M, M:2*M] = fsa*alpha*beta/gIs*L0
L[M:2*M, 0:M] = ((1-alpha)*beta/gIs)*L0
L[M:2*M, M:2*M] = fsa*((1-alpha)*beta/gIs)*L0
r0 = np.max(np.linalg.eigvals(L))
print("The basic reproductive ratio for these parameters is", r0)
# duration of simulation and data file
Tf=21*2-1; Nf=2000;
# the contact structure is independent of time
def contactMatrix(t):
return C
# intantiate model
parameters = {'alpha':alpha,'beta':beta, 'gIa':gIa,'gIs':gIs,'fsa':fsa}
model = pyross.models.SIR(parameters, M, Ni)
# run model
data=model.simulate(S_0, Ia_0, Is_0, contactMatrix, Tf, Nf)
t = data['t']; IC = np.zeros((Nf))
for i in range(M):
IC += data['X'][:,2*M+i]
fig = plt.figure(num=None, figsize=(10, 8), dpi=80, facecolor='w', edgecolor='k')
plt.rcParams.update({'font.size': 22})
plt.plot(t, IC, '-', lw=4, color='#A60628', label='forecast', alpha=0.8)
plt.xticks(np.arange(0, Tf, 10), ('4 Mar', '18 Mar', '24 Mar', '3 Apr', '13 Apr', '23 Apr'));
my_data = np.genfromtxt('data/covid-cases/india.txt', delimiter='', skip_header=6)
day, cases = my_data[:,0], my_data[:,3] - my_data[:,1]
plt.plot(cases, 'o-', lw=4, color='#348ABD', ms=16, label='data', alpha=0.5)
plt.legend(fontsize=26, loc='upper left'); plt.grid()
plt.autoscale(enable=True, axis='x', tight=True)
plt.ylabel('Infected individuals'); plt.xlim(0, 40); plt.ylim(0, 9999);
#plt.savefig('/Users/rsingh/Desktop/2a.png', format='png', dpi=212)
cases
C=CH+CW+CS+CO
Tf=200;
# matrix for linearised dynamics
L0 = np.zeros((M, M))
L = np.zeros((2*M, 2*M))
for i in range(M):
for j in range(M):
L0[i,j]=C[i,j]*Ni[i]/Ni[j]
L[0:M, 0:M] = alpha*beta/gIs*L0
L[0:M, M:2*M] = fsa*alpha*beta/gIs*L0
L[M:2*M, 0:M] = ((1-alpha)*beta/gIs)*L0
L[M:2*M, M:2*M] = fsa*((1-alpha)*beta/gIs)*L0
r0 = np.max(np.linalg.eigvals(L))
print("The basic reproductive ratio for these parameters is", r0)
def contactMatrix(t):
return C
# start simulation
Nf=2000;
data=model.simulate(S_0, Ia_0, Is_0, contactMatrix, Tf, Nf)
t = data.get('t'); IC = np.zeros((Nf)); SC = np.zeros((Nf))
for i in range(M):
SC += data.get('X')[:,0*M+i]
IC += data.get('X')[:,2*M+i]
fig = plt.figure(num=None, figsize=(10, 8), dpi=80, facecolor='w', edgecolor='k')
plt.rcParams.update({'font.size': 22})
plt.plot(t, SC*10**(-6), '-', lw=4, color='#348ABD', label='susceptible', alpha=0.8,)
plt.fill_between(t, 0, SC*10**(-6), color="#348ABD", alpha=0.3)
plt.plot(t, IC*10**(-6), '-', lw=4, color='#A60628', label='infected', alpha=0.8)
plt.fill_between(t, 0, IC*10**(-6), color="#A60628", alpha=0.3)
my_data = np.genfromtxt('data/covid-cases/india.txt', delimiter='', skip_header=6)
day, cases = my_data[:,0], my_data[:,3] - my_data[:,1]
plt.plot(cases*10**(-6), 'ro-', lw=4, color='dimgrey', ms=16, label='data', alpha=0.5)
plt.legend(fontsize=26); plt.grid()
plt.autoscale(enable=True, axis='x', tight=True)
plt.ylabel('Individuals (millions)')
plt.plot(t*0+t[np.argsort(IC)[-1]], -170+.4*SC*10**(-6), lw=4, color='g', alpha=0.8)
plt.xticks(np.arange(0, 200, 30), ('4 Mar', '3 Apr', '3 May', '2 Jun', '2 Jul', '1 Aug', '31 Aug'));
#plt.savefig('/Users/rsingh/Desktop/2b.png', format='png', dpi=212)
IC[np.argsort(IC)[-1]]
IC
cases
# matrix for linearised dynamics
L0 = np.zeros((M, M))
L = np.zeros((2*M, 2*M))
xind=[np.argsort(IC)[-1]]
rr = np.zeros((Tf))
for tt in range(Tf):
Si = np.array((data['X'][tt*10,0:M])).flatten()
for i in range(M):
for j in range(M):
L0[i,j]=C[i,j]*Si[i]/Ni[j]
L[0:M, 0:M] = alpha*beta/gIs*L0
L[0:M, M:2*M] = fsa*alpha*beta/gIs*L0
L[M:2*M, 0:M] = ((1-alpha)*beta/gIs)*L0
L[M:2*M, M:2*M] = fsa*((1-alpha)*beta/gIs)*L0
rr[tt] = np.real(np.max(np.linalg.eigvals(L)))
fig = plt.figure(num=None, figsize=(10, 8), dpi=80, facecolor='w', edgecolor='k')
plt.rcParams.update({'font.size': 22})
plt.plot(t[::10], rr, 'o', lw=4, color='#A60628', label='suscetible', alpha=0.8,)
plt.fill_between(t, 0, t*0+1, color="dimgrey", alpha=0.2); plt.ylabel('Basic reproductive ratio')
plt.ylim(np.min(rr)-.1, np.max(rr)+.1)
plt.xticks(np.arange(0, 200, 30), ('4 Mar', '3 Apr', '3 May', '2 Jun', '2 Jul', '1 Aug', '31 Aug'));
#plt.savefig('/Users/rsingh/Desktop/test.png', format='png', dpi=212)
fig = plt.figure(num=None, figsize=(10, 8), dpi=80, facecolor='w', edgecolor='k')
plt.rcParams.update({'font.size': 22})
plt.bar(np.arange(16),data.get('X')[0,0:M]*10**(-6), label='susceptible (initial)', alpha=0.8)
plt.bar(np.arange(16),data.get('X')[-1,0:M]*10**(-6), label='susceptible (final)', alpha=0.8)
plt.xticks(np.arange(-0.4, 16.45, 3.95), ('0', '20', '40', '60', '80'));
plt.xlim(-0.45, 15.45); plt.ylabel('Individuals (millions)'); plt.xlabel('Age')
plt.legend(fontsize=22); plt.axis('tight')
plt.autoscale(enable=True, axis='x', tight=True)
#plt.savefig('/Users/rsingh/Desktop/3a.png', format='png', dpi=212)
MM = np.array((0,0,.2,.2,.2,.2,.2,.2,.4,.4,1.3,1.3,3.6,3.6,8,8))
## Fatality Rate by AGE per 100 cases
fig = plt.figure(num=None, figsize=(10, 8), dpi=80, facecolor='w', edgecolor='k')
plt.rcParams.update({'font.size': 22})
m1 = .01*MM*(data.get('X')[0,0:M]-data['X'][-1,0:M])
plt.bar(np.arange(16),m1*10**(-6), label='susceptible (final)', alpha=0.8)
plt.axis('tight'); plt.xticks(np.arange(-0.4, 16.45, 3.95), ('0', '20', '40', '60', '80'));
plt.xlim(-0.45, 15.45); plt.ylabel('Mortality (millions)'); plt.xlabel('Age')
plt.autoscale(enable=True, axis='x', tight=True)
#plt.savefig('/Users/rsingh/Desktop/3b.png', format='png', dpi=212)
m1 = .01*MM*(data['X'][0,0:M]-data['X'][-1,0:M])
np.sum(m1)
SC[0]-SC[-1]
900*10^7
| 0.393968 | 0.955068 |
```
!wget -O loan_train.csv https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/loan_train.csv
import itertools
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
import pandas as pd
import numpy as np
import matplotlib.ticker as ticker
from sklearn import preprocessing
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import log_loss
from sklearn.metrics import f1_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
%matplotlib inline
df = pd.read_csv('loan_train.csv')
df['Gender'].replace(to_replace=['male','female'], value=[0,1],inplace=True)
df['loan_status'].replace(to_replace=['COLLECTION','PAIDOFF'], value=[0,1],inplace=True)
df['due_date'] = pd.to_datetime(df['due_date'])
df['effective_date'] = pd.to_datetime(df['effective_date'])
df['dayofweek'] = df['effective_date'].dt.dayofweek
df['weekend'] = df['dayofweek'].apply(lambda x: 1 if (x>3) else 0)
Feature = df[['Principal','terms','age','Gender','weekend']]
Feature = pd.concat([Feature,pd.get_dummies(df['education'])], axis=1)
Feature.drop(['Master or Above'], axis = 1,inplace=True)
X = Feature
y = df['loan_status'].values
X = preprocessing.StandardScaler().fit(X).transform(X)
X_trainset, X_testset, y_trainset, y_testset = train_test_split(X, y, test_size=0.3, random_state=3)
defaultTree = DecisionTreeClassifier(criterion="entropy", max_depth = 4)
defaultTree.fit(X_trainset,y_trainset)
predTree = defaultTree.predict(X_testset)
print (predTree [0:5])
print (y_testset [0:5])
from sklearn import metrics
import matplotlib.pyplot as plt
print("DecisionTrees's Accuracy: ", metrics.accuracy_score(y_testset, predTree))
!wget -O loan_test.csv https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/loan_test.csv
test_df = pd.read_csv('loan_test.csv')
test_df['Gender'].replace(to_replace=['male','female'], value=[0,1],inplace=True)
test_df['loan_status'].replace(to_replace=['COLLECTION','PAIDOFF'], value=[0,1],inplace=True)
test_df['due_date'] = pd.to_datetime(test_df['due_date'])
test_df['effective_date'] = pd.to_datetime(test_df['effective_date'])
test_df['dayofweek'] = test_df['effective_date'].dt.dayofweek
test_df['weekend'] = test_df['dayofweek'].apply(lambda x: 1 if (x>3) else 0)
test_Feature = test_df[['Principal','terms','age','Gender','weekend']]
test_Feature = pd.concat([test_Feature,pd.get_dummies(test_df['education'])], axis=1)
test_Feature.drop(['Master or Above'], axis = 1,inplace=True)
test_X = test_Feature
test_y = test_df['loan_status'].values
test_X = preprocessing.StandardScaler().fit(test_X).transform(test_X)
test_yhat = defaultTree.predict(test_X)
jaccard_similarity_score(test_yhat, test_y)
f1_score(test_yhat, test_y, average='weighted')
```
|
github_jupyter
|
!wget -O loan_train.csv https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/loan_train.csv
import itertools
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
import pandas as pd
import numpy as np
import matplotlib.ticker as ticker
from sklearn import preprocessing
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import log_loss
from sklearn.metrics import f1_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
%matplotlib inline
df = pd.read_csv('loan_train.csv')
df['Gender'].replace(to_replace=['male','female'], value=[0,1],inplace=True)
df['loan_status'].replace(to_replace=['COLLECTION','PAIDOFF'], value=[0,1],inplace=True)
df['due_date'] = pd.to_datetime(df['due_date'])
df['effective_date'] = pd.to_datetime(df['effective_date'])
df['dayofweek'] = df['effective_date'].dt.dayofweek
df['weekend'] = df['dayofweek'].apply(lambda x: 1 if (x>3) else 0)
Feature = df[['Principal','terms','age','Gender','weekend']]
Feature = pd.concat([Feature,pd.get_dummies(df['education'])], axis=1)
Feature.drop(['Master or Above'], axis = 1,inplace=True)
X = Feature
y = df['loan_status'].values
X = preprocessing.StandardScaler().fit(X).transform(X)
X_trainset, X_testset, y_trainset, y_testset = train_test_split(X, y, test_size=0.3, random_state=3)
defaultTree = DecisionTreeClassifier(criterion="entropy", max_depth = 4)
defaultTree.fit(X_trainset,y_trainset)
predTree = defaultTree.predict(X_testset)
print (predTree [0:5])
print (y_testset [0:5])
from sklearn import metrics
import matplotlib.pyplot as plt
print("DecisionTrees's Accuracy: ", metrics.accuracy_score(y_testset, predTree))
!wget -O loan_test.csv https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/loan_test.csv
test_df = pd.read_csv('loan_test.csv')
test_df['Gender'].replace(to_replace=['male','female'], value=[0,1],inplace=True)
test_df['loan_status'].replace(to_replace=['COLLECTION','PAIDOFF'], value=[0,1],inplace=True)
test_df['due_date'] = pd.to_datetime(test_df['due_date'])
test_df['effective_date'] = pd.to_datetime(test_df['effective_date'])
test_df['dayofweek'] = test_df['effective_date'].dt.dayofweek
test_df['weekend'] = test_df['dayofweek'].apply(lambda x: 1 if (x>3) else 0)
test_Feature = test_df[['Principal','terms','age','Gender','weekend']]
test_Feature = pd.concat([test_Feature,pd.get_dummies(test_df['education'])], axis=1)
test_Feature.drop(['Master or Above'], axis = 1,inplace=True)
test_X = test_Feature
test_y = test_df['loan_status'].values
test_X = preprocessing.StandardScaler().fit(test_X).transform(test_X)
test_yhat = defaultTree.predict(test_X)
jaccard_similarity_score(test_yhat, test_y)
f1_score(test_yhat, test_y, average='weighted')
| 0.530236 | 0.494446 |
### ¿Cómo funciona la suspensión de un auto?
> Una primer aproximación al modelo de la suspensión de un automovil es considerar el oscilador armónico amortiguado. El cual se representa a través de la siguiente ecuación diferencial.
\begin{equation}
m\ddot{x} + k x + B \dot{x} = 0
\end{equation}
donde $k$ es la constante del muelle, y $B$ la constante de amortiguación.
Referencia:
- https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.odeint.html
- https://docs.scipy.org/doc/scipy/reference/index.html
___
<div>
<img style="float: left; margin: 0px 0px 15px 0px;" src="https://upload.wikimedia.org/wikipedia/commons/thumb/c/ce/Packard_wishbone_front_suspension_%28Autocar_Handbook%2C_13th_ed%2C_1935%29.jpg/414px-Packard_wishbone_front_suspension_%28Autocar_Handbook%2C_13th_ed%2C_1935%29.jpg" width="150px" height="50px" />
<img style="float: center; margin: 0px 0px 15px 15px;" src="https://upload.wikimedia.org/wikipedia/commons/d/df/Radaufhängung_Renault.JPG" width="150px" height="100px" />
</div>
Esta es una ecuación diferencial ordinaria (EDO). En python existe una función llamada _odeint_ del paquete _integrate_ de la libreria _scipy_, que permite integrar sistemas del tipo
\begin{equation}
\frac{dy}{dt} = f(x,y)
\end{equation}
con condiciones iniciales $y(0) = y_{0}$. Ahora bien, si nos fijamos bien, la ecuación diferencial que tenemos es de segundo orden. No hay problema. La podemos simplificar como un sistema de ecuaciones de primer orden como sigue:
\begin{align}
\dot{x} & = y \\
\dot{y} & = -\frac{k}{m} x - \frac{B}{m} y
\end{align}
```
import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import odeint
%matplotlib inline
k = 3.0 # Constante del muelle
m = 1.0 # Masa
B = .5 # Constante de amortiguación
def armonico(variables, t):
x, y = variables
return [y, -k * x / m - B / m * y]
inicial = [0.6, 0.4] # Vector de posición inicial y velocidad inicial
# condiciones iniciales x(t=0)=0.6 [m] y(t=0)=0.4 [m/s]
#tiempo = np.linspace(0, 15) # Dominio temporal de 0 a 15
tiempo = np.arange(0, 20, .01)
resultado = odeint(armonico, inicial, tiempo)
#El sistema se resuelve con
#odeint(sistema, condiciones iniciales, rango donde graficaremos)
xx, yy = resultado.T # extraer posición y velocidad.
import matplotlib as mpl
label_size = 14
mpl.rcParams['xtick.labelsize'] = label_size
mpl.rcParams['ytick.labelsize'] = label_size
plt.plot(tiempo, xx, c = 'r', label="Posicion")
plt.plot(tiempo, yy, c = 'k', label="Velocidad")
plt.legend(loc = 'best', prop={'size': 14})
plt.xlabel('tiempo', fontsize = 14)
plt.show()
omega0 = k/m
plt.figure(figsize = (6,6))
plt.scatter(xx, yy/omega0, lw = 0, s = 3, cmap = 'viridis', c = xx)
plt.show()
from ipywidgets import *
def amortiguado(t = 0):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(tiempo, xx, 'k-')
ax.plot(tiempo[t], xx[t], 'ro')
ax.text(4, .6, 'tiempo = %s'%tiempo[t])
ax.text(4, .5, 'posición = %s'%xx[t])
fig.canvas.draw()
t_f = len(tiempo)
interact_manual(amortiguado, t = (0, t_f, 1));
```
### Casos
Teníamos
\begin{equation}
m\ddot{x} + k x + B \dot{x} = 0
\end{equation}
si recordamos que $\omega_0 ^2 = k/m$ y definimos $B/m\equiv 2\Gamma$, tendremos
\begin{equation}
\ddot{x} + 2\Gamma \dot{x}+ \omega_0^2 x = 0
\end{equation}
### Amortiguado
Si $\omega_0^2 > \Gamma^2$ se tiene movimiento oscilatorio amortiguado.
```
omega0 = k/m
Gamma = B/(2*m)
omega0**2, Gamma**2
omega0**2 > Gamma**2
```
Entonces, el primer caso que ya habíamos presentado corresponde a movimiento amortiguado.
```
plt.plot(tiempo, xx, c = 'r', label="Posicion")
plt.plot(tiempo, yy, c = 'k', label="Velocidad")
plt.legend(loc = 'best', prop={'size': 14})
plt.xlabel('tiempo', fontsize = 14)
plt.show()
```
### Sobreamortiguado
Si $\omega_0^2 < \Gamma^2$ se tiene movimiento oscilatorio amortiguado.
```
k = .1 # Constante del muelle
m = 1.0 # Masa
B = .5 # Constante de amortiguación
omega0 = k/m
Gamma = B/(2*m)
omega0**2, Gamma**2
omega0**2 < Gamma**2
inicial = [0.6, 0.4]
tiempo = np.arange(0, 20, .01)
resultado = odeint(armonico, inicial, tiempo)
xxA, yyA = resultado.T # extraer posición y velocidad.
plt.plot(tiempo, xxA, c = 'r', label="Posicion")
plt.plot(tiempo, yyA, c = 'k', label="Velocidad")
plt.legend(loc = 'best', prop={'size': 14})
plt.xlabel('tiempo', fontsize = 14)
plt.show()
```
### Amortiguamiento crítico
Si $\omega_0^2 = \Gamma^2$ se tiene movimiento aperiódico crítico (amortiguamiento crítico).
```
k = np.sqrt(.0625) # Constante del muelle
m = 1.0 # Masa
B = .5 # Constante de amortiguación
omega0 = k/m
Gamma = B/(2*m)
omega0**2, Gamma**2
omega0**2 == Gamma**2
inicial = [0.6, 0.4]
tiempo = np.arange(0, 20, .01)
resultado = odeint(armonico, inicial, tiempo)
xxC, yyC = resultado.T # extraer posición y velocidad.
plt.plot(tiempo, xxC, c = 'r', label="Posicion")
plt.plot(tiempo, yyC, c = 'k', label="Velocidad")
plt.legend(loc = 'best',prop={'size': 14})
plt.xlabel('tiempo', fontsize = 14)
plt.show()
```
En resumen, se tiene entonces:
```
fig, ((ax1, ax2, ax3), (ax4, ax5, ax6)) = plt.subplots(2, 3, sharex='col',
sharey='row',figsize =(10,6))
ax1.plot(tiempo, xx, c = 'k')
ax1.set_title('Amortiguado', fontsize = 14)
ax1.set_ylabel('Posición', fontsize = 14)
ax2.plot(tiempo, xxA, c = 'b')
ax2.set_title('Sobreamortiguado', fontsize = 14)
ax3.plot(tiempo, xxC, c = 'r')
ax3.set_title('Crítico', fontsize = 16)
ax4.plot(tiempo, yy, c = 'k')
ax4.set_ylabel('Velocidad', fontsize = 14)
ax4.set_xlabel('tiempo', fontsize = 14)
ax5.plot(tiempo, yyA, c = 'b')
ax5.set_xlabel('tiempo', fontsize = 14)
ax6.plot(tiempo, yyC, c = 'r')
ax6.set_xlabel('tiempo', fontsize = 14)
plt.show()
```
> **Actividad**. ¿Cómo se ve el espacio fase para los diferentes casos así como para diferentes condiciones iniciales?
<script>
$(document).ready(function(){
$('div.prompt').hide();
$('div.back-to-top').hide();
$('nav#menubar').hide();
$('.breadcrumb').hide();
$('.hidden-print').hide();
});
</script>
<footer id="attribution" style="float:right; color:#808080; background:#fff;">
Created with Jupyter by Lázaro Alonso.
</footer>
|
github_jupyter
|
import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import odeint
%matplotlib inline
k = 3.0 # Constante del muelle
m = 1.0 # Masa
B = .5 # Constante de amortiguación
def armonico(variables, t):
x, y = variables
return [y, -k * x / m - B / m * y]
inicial = [0.6, 0.4] # Vector de posición inicial y velocidad inicial
# condiciones iniciales x(t=0)=0.6 [m] y(t=0)=0.4 [m/s]
#tiempo = np.linspace(0, 15) # Dominio temporal de 0 a 15
tiempo = np.arange(0, 20, .01)
resultado = odeint(armonico, inicial, tiempo)
#El sistema se resuelve con
#odeint(sistema, condiciones iniciales, rango donde graficaremos)
xx, yy = resultado.T # extraer posición y velocidad.
import matplotlib as mpl
label_size = 14
mpl.rcParams['xtick.labelsize'] = label_size
mpl.rcParams['ytick.labelsize'] = label_size
plt.plot(tiempo, xx, c = 'r', label="Posicion")
plt.plot(tiempo, yy, c = 'k', label="Velocidad")
plt.legend(loc = 'best', prop={'size': 14})
plt.xlabel('tiempo', fontsize = 14)
plt.show()
omega0 = k/m
plt.figure(figsize = (6,6))
plt.scatter(xx, yy/omega0, lw = 0, s = 3, cmap = 'viridis', c = xx)
plt.show()
from ipywidgets import *
def amortiguado(t = 0):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(tiempo, xx, 'k-')
ax.plot(tiempo[t], xx[t], 'ro')
ax.text(4, .6, 'tiempo = %s'%tiempo[t])
ax.text(4, .5, 'posición = %s'%xx[t])
fig.canvas.draw()
t_f = len(tiempo)
interact_manual(amortiguado, t = (0, t_f, 1));
omega0 = k/m
Gamma = B/(2*m)
omega0**2, Gamma**2
omega0**2 > Gamma**2
plt.plot(tiempo, xx, c = 'r', label="Posicion")
plt.plot(tiempo, yy, c = 'k', label="Velocidad")
plt.legend(loc = 'best', prop={'size': 14})
plt.xlabel('tiempo', fontsize = 14)
plt.show()
k = .1 # Constante del muelle
m = 1.0 # Masa
B = .5 # Constante de amortiguación
omega0 = k/m
Gamma = B/(2*m)
omega0**2, Gamma**2
omega0**2 < Gamma**2
inicial = [0.6, 0.4]
tiempo = np.arange(0, 20, .01)
resultado = odeint(armonico, inicial, tiempo)
xxA, yyA = resultado.T # extraer posición y velocidad.
plt.plot(tiempo, xxA, c = 'r', label="Posicion")
plt.plot(tiempo, yyA, c = 'k', label="Velocidad")
plt.legend(loc = 'best', prop={'size': 14})
plt.xlabel('tiempo', fontsize = 14)
plt.show()
k = np.sqrt(.0625) # Constante del muelle
m = 1.0 # Masa
B = .5 # Constante de amortiguación
omega0 = k/m
Gamma = B/(2*m)
omega0**2, Gamma**2
omega0**2 == Gamma**2
inicial = [0.6, 0.4]
tiempo = np.arange(0, 20, .01)
resultado = odeint(armonico, inicial, tiempo)
xxC, yyC = resultado.T # extraer posición y velocidad.
plt.plot(tiempo, xxC, c = 'r', label="Posicion")
plt.plot(tiempo, yyC, c = 'k', label="Velocidad")
plt.legend(loc = 'best',prop={'size': 14})
plt.xlabel('tiempo', fontsize = 14)
plt.show()
fig, ((ax1, ax2, ax3), (ax4, ax5, ax6)) = plt.subplots(2, 3, sharex='col',
sharey='row',figsize =(10,6))
ax1.plot(tiempo, xx, c = 'k')
ax1.set_title('Amortiguado', fontsize = 14)
ax1.set_ylabel('Posición', fontsize = 14)
ax2.plot(tiempo, xxA, c = 'b')
ax2.set_title('Sobreamortiguado', fontsize = 14)
ax3.plot(tiempo, xxC, c = 'r')
ax3.set_title('Crítico', fontsize = 16)
ax4.plot(tiempo, yy, c = 'k')
ax4.set_ylabel('Velocidad', fontsize = 14)
ax4.set_xlabel('tiempo', fontsize = 14)
ax5.plot(tiempo, yyA, c = 'b')
ax5.set_xlabel('tiempo', fontsize = 14)
ax6.plot(tiempo, yyC, c = 'r')
ax6.set_xlabel('tiempo', fontsize = 14)
plt.show()
| 0.335569 | 0.919751 |
# Computational Assignment 1
**Assigned Tuesday, 1-22-19.**, **Due Tuesday, 1-29-19.**
Congratulations on installing the Jupyter Notebook! Welcomne to your first computational assignment!
Beyond using this as a tool to understand physical chemistry, python and notebooks are actually used widely in scientific analysis. Big data analysis especially uses python notebooks.
## Introduction to the notebook
If you double click on the text above, you will notice the look suddenly changes. Every section of the notebook, including the introductory text, is a technically a code entry. The text is written in a typesetting language called **Markdown**. To learn more see https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet
To run a code entry in the notebook, select the section you want to run and type
`shift+enter`
If you want to make notes on a notebook, you can press the plus sign in the toolbar above to creat a new entry. Then make sure to switch the menu that in the toolbar from **code** to **Markdown**.
We can also run calculations this way.
In the entry below, I haved typed
`123+3483`
Select the entry and type `shift+enter`
```
123+3483
```
Once you run an entry, the output is displayed on the screen, when applicable
Now try some arithmatic yourself in the blank entry below.
(Don't forget to hit `shift+enter` to run your calculation!)
```
123+3483
```
## Introduction to programming and python
Python is a very powerful and intuitive modern programming language. It is easier to learn than many other languages. Because of the wide availability of libraries such **numpy** and **scipy** (among many others), it is very useful for scientific calculations.
In this section, we will cover some very basic concepts. I assuming that nearly everyone has little or no previous programming experience, which is common for most chemistry and biology students.
We will slowly build up to the skills we need to run complex calculations!
### Our first python code: "Hello World!"
The first thing we usually learn how to do is print a simple message to the output. Run the following entry.
```
print("Hello World!")
```
Print is function that takes in text as an argument and outputs that text.
A slightly more complicated example. Run the following entry
```
# This is a comment in python
# Set a variable
x = 1 + 7
# print the result of the variable
print(x)
```
The lines that begin with "#" are comments. They are not read by the notebook and do not affect the code. They are very useful to make your code human readable
This snippet of code set assigned the result of `1+7` to the variable `x` and then used `print` to output that value.
## Loops
One of the benifits of the computer is that it can run a calcualtion many times without having to manually type each line. The way that we do this is to use a **loop**.
```
# This is an example of a loop
# The colon is required on the first line
for i in (1,2,3):
# This indentation is required for loops
print ("Hello World, iteration",i)
```
### Explanation
1. The command `for` tells the code that this is a loop
2. The variable `i` is the counting variable. Everytime the loop runs, it will sequentially take on a different value from the list
3. The `(1,2,3)` is list of values.
Sometimes we need to run a loop many times or iterate over a large list of numbers. For this, the `range` command is useful
```
# The command range(a,b) creates a list of numbers from a to b
for i in range(-3,3):
print ("Hello World, iteration",i)
```
Note that the `range(a,b)` command makes a list that spans from `a` to `b-1`.
In the example above `range(-3,3`) makes a list that goes from -3 to 2
## Conditional Statements: IF
Many times we want the computer to do something after analyzing a logical statement. **If this is true, then do that**. These are called conditional statements
```
# Conditional example
a = 100
if (a>0):
#Like in the loop example, the indentation defines what happens in this
# block of the if statement
print("the number is positive")
elif (a<0):
print("the number is negative")
elif (a==0):
print("the number is zero")
```
Now we can try it again with a different value for `a`
```
# Conditional example again
a = -1234
if (a>0):
print("the number is positive")
elif (a<0):
print("the number is negative")
elif (a==0):
print("the number is zero")
```
Once more time
```
# Conditional example again
a = 0
if (a>0):
print("the number is positive")
elif (a<0):
print("the number is negative")
elif (a==0):
print("the number is zero")
```
## Bringing it all together
These can all be combined together to perform complicated actions. Note the indentation and colons. They matter.
### Combined Example
```
# A loop with an if statement
for i in range(-1,2):
print("Iteration",i)
if (i==0):
print("zero!")
```
# Exercise
Following the examples above, write a code snippet that uses the `range` command to scan from -10 to 10 and print whether the number is positive, negative, or zero.
**To turn this in, print this notebook and please make sure that your name is written on in.**
```
# Xin Yao Ren
for i in range(-10,11):
if (i>0):
print ("The number is positive")
elif (i<0):
print ("The number is negative")
elif (i ==0):
print ("The number is zero")
```
|
github_jupyter
|
123+3483
123+3483
print("Hello World!")
# This is a comment in python
# Set a variable
x = 1 + 7
# print the result of the variable
print(x)
# This is an example of a loop
# The colon is required on the first line
for i in (1,2,3):
# This indentation is required for loops
print ("Hello World, iteration",i)
# The command range(a,b) creates a list of numbers from a to b
for i in range(-3,3):
print ("Hello World, iteration",i)
# Conditional example
a = 100
if (a>0):
#Like in the loop example, the indentation defines what happens in this
# block of the if statement
print("the number is positive")
elif (a<0):
print("the number is negative")
elif (a==0):
print("the number is zero")
# Conditional example again
a = -1234
if (a>0):
print("the number is positive")
elif (a<0):
print("the number is negative")
elif (a==0):
print("the number is zero")
# Conditional example again
a = 0
if (a>0):
print("the number is positive")
elif (a<0):
print("the number is negative")
elif (a==0):
print("the number is zero")
# A loop with an if statement
for i in range(-1,2):
print("Iteration",i)
if (i==0):
print("zero!")
# Xin Yao Ren
for i in range(-10,11):
if (i>0):
print ("The number is positive")
elif (i<0):
print ("The number is negative")
elif (i ==0):
print ("The number is zero")
| 0.13109 | 0.987981 |
```
%load_ext autoreload
%autoreload 2
import numpy as np
import cvxpy as cp
import polytope as pc
import matplotlib.pyplot as plt
from evanqp import CvxpyProblem, Polytope, Verifier
from utils import dlqr
class DoubleIntegrator(CvxpyProblem):
def __init__(self, N=10):
self.N = N
n = 2
m = 1
# Double Integrator
self.A = np.array([[1.0, 1.0], [0.5, 1.0]])
self.B = np.array([[0.0], [1.0]])
# Weights
self.Q = np.diag([1.0, 1.0])
self.R = np.array([[1.0]])
self.K, self.P, _ = dlqr(self.A, self.B, self.Q, self.R)
# Constraints
self.x_max = np.array([10.0, 10.0])
self.x_min = np.array([-10.0, -10.0])
self.u_max = 1.0
self.u_min = -1.0
# Terminal Set computation
# state constraints
Hx = np.vstack((np.eye(n), -np.eye(n)))
hx = np.concatenate((self.x_max, -self.x_min))
# input constraints
Hu = np.vstack((np.eye(m), -np.eye(m)))
hu = np.array([self.u_max, -self.u_min])
# closed loop dynamics
Ak = self.A - self.B @ self.K
# state & input constraints
HH = np.vstack((Hx, -Hu @ self.K))
hh = np.concatenate((hx, hu))
# compute maximal invariant set
O = pc.Polytope(HH, hh)
while True:
O_prev = O
# pre-set
O = O.intersect(pc.Polytope(O.A @ Ak, O.b))
if O == O_prev:
break
self.F, self.f = O.A, O.b
self.x0 = cp.Parameter(n, name='x0')
self.xN = cp.Parameter(n, name='xN')
self.x = cp.Variable((N + 1, n), name='x')
self.u = cp.Variable((N, m), name='u')
objective = cp.quad_form(self.x[N, :], self.P)
constraints = [self.x0 == self.x[0, :], self.xN == self.x[N, :]]
for i in range(N):
objective += cp.quad_form(self.x[i, :], self.Q) + cp.quad_form(self.u[i, :], self.R)
constraints += [self.x[i + 1, :] == self.A @ self.x[i, :] + self.B @ self.u[i, :]]
constraints += [self.x_min <= self.x[i, :], self.x[i, :] <= self.x_max]
constraints += [self.u_min <= self.u[i, :], self.u[i, :] <= self.u_max]
# constraints += [self.F @ self.x[N, :] <= self.f]
self.objective = cp.Minimize(objective)
self._problem = cp.Problem(self.objective, constraints)
def problem(self):
return self._problem
def parameters(self):
return [self.x0]
def variables(self):
return [self.xN]
def solve(self, x0):
self.x0.value = x0
self._problem.solve(solver=cp.GUROBI)
solution = {self.u0: self.u0.value,
self.u: self.u.value,
self.x: self.x.value,
self.objective: self.objective.value}
return solution
mpc_controller = DoubleIntegrator()
parameter_set = Polytope(np.array([[1, 0], [-1, 0], [0, 1], [0, -1]]), np.array([10, 10, 10, 10]))
verifier = Verifier(parameter_set, mpc_controller)
poly = Polytope(mpc_controller.F, mpc_controller.f)
verifier.variables_in_polytope(poly)
poly = Polytope(mpc_controller.F, mpc_controller.f)
Verifier.min_optimal_mpc_horizon(parameter_set, lambda N: DoubleIntegrator(N=N), poly)
```
|
github_jupyter
|
%load_ext autoreload
%autoreload 2
import numpy as np
import cvxpy as cp
import polytope as pc
import matplotlib.pyplot as plt
from evanqp import CvxpyProblem, Polytope, Verifier
from utils import dlqr
class DoubleIntegrator(CvxpyProblem):
def __init__(self, N=10):
self.N = N
n = 2
m = 1
# Double Integrator
self.A = np.array([[1.0, 1.0], [0.5, 1.0]])
self.B = np.array([[0.0], [1.0]])
# Weights
self.Q = np.diag([1.0, 1.0])
self.R = np.array([[1.0]])
self.K, self.P, _ = dlqr(self.A, self.B, self.Q, self.R)
# Constraints
self.x_max = np.array([10.0, 10.0])
self.x_min = np.array([-10.0, -10.0])
self.u_max = 1.0
self.u_min = -1.0
# Terminal Set computation
# state constraints
Hx = np.vstack((np.eye(n), -np.eye(n)))
hx = np.concatenate((self.x_max, -self.x_min))
# input constraints
Hu = np.vstack((np.eye(m), -np.eye(m)))
hu = np.array([self.u_max, -self.u_min])
# closed loop dynamics
Ak = self.A - self.B @ self.K
# state & input constraints
HH = np.vstack((Hx, -Hu @ self.K))
hh = np.concatenate((hx, hu))
# compute maximal invariant set
O = pc.Polytope(HH, hh)
while True:
O_prev = O
# pre-set
O = O.intersect(pc.Polytope(O.A @ Ak, O.b))
if O == O_prev:
break
self.F, self.f = O.A, O.b
self.x0 = cp.Parameter(n, name='x0')
self.xN = cp.Parameter(n, name='xN')
self.x = cp.Variable((N + 1, n), name='x')
self.u = cp.Variable((N, m), name='u')
objective = cp.quad_form(self.x[N, :], self.P)
constraints = [self.x0 == self.x[0, :], self.xN == self.x[N, :]]
for i in range(N):
objective += cp.quad_form(self.x[i, :], self.Q) + cp.quad_form(self.u[i, :], self.R)
constraints += [self.x[i + 1, :] == self.A @ self.x[i, :] + self.B @ self.u[i, :]]
constraints += [self.x_min <= self.x[i, :], self.x[i, :] <= self.x_max]
constraints += [self.u_min <= self.u[i, :], self.u[i, :] <= self.u_max]
# constraints += [self.F @ self.x[N, :] <= self.f]
self.objective = cp.Minimize(objective)
self._problem = cp.Problem(self.objective, constraints)
def problem(self):
return self._problem
def parameters(self):
return [self.x0]
def variables(self):
return [self.xN]
def solve(self, x0):
self.x0.value = x0
self._problem.solve(solver=cp.GUROBI)
solution = {self.u0: self.u0.value,
self.u: self.u.value,
self.x: self.x.value,
self.objective: self.objective.value}
return solution
mpc_controller = DoubleIntegrator()
parameter_set = Polytope(np.array([[1, 0], [-1, 0], [0, 1], [0, -1]]), np.array([10, 10, 10, 10]))
verifier = Verifier(parameter_set, mpc_controller)
poly = Polytope(mpc_controller.F, mpc_controller.f)
verifier.variables_in_polytope(poly)
poly = Polytope(mpc_controller.F, mpc_controller.f)
Verifier.min_optimal_mpc_horizon(parameter_set, lambda N: DoubleIntegrator(N=N), poly)
| 0.584627 | 0.49707 |
```
def start_game():
# Generateing 2-D matrix (list)
mat = []
for i in range(4):
mat.append([0]*4)
return mat
import random
def add_new_2(mat):
# Generating random position for new 2
r = random.randint(0, 3)
c = random.randint(0, 3)
while mat[r][c] != 0:
r = random.randint(0, 3)
c = random.randint(0, 3)
mat[r][c] = 2
def get_current_state(mat):
# WON
# Anywhere 2048 is present
for i in range(4):
for j in range(4):
if mat[i][j] == 2048:
return 'WON!'
# Not Over Yet
# Anywhere 0 is present
for i in range(4):
for j in range(4):
if mat[i][j] == 0:
return 'GAME NOT OVER!'
# Every row and column except last row and last column
for i in range(3):
for j in range(3):
if mat[i][j] == mat[i+1][j] or mat[i][j] == mat[i][j+1]:
return 'GAME NOT OVER!'
# Last Row
for j in range(3):
if mat[3][j] == mat[3][j+1]:
return 'GAME NOT OVER!'
# Last Column
for i in range(3):
if mat[i][3] == mat[i+1][3]:
return 'GAME NOT OVER!'
# LOST
return 'LOST!'
def compress(mat):
new_mat = []
changed = False
for i in range(4):
new_mat.append([0]*4)
for i in range(4):
pos = 0
for j in range(4):
if mat[i][j] != 0:
new_mat[i][pos] = mat[i][j]
if j != pos:
changed = True
pos += 1
return new_mat, changed
def merge(mat):
changed = False
for i in range(4):
for j in range(3):
if mat[i][j] == mat[i][j+1] and mat[i][j] != 0:
mat[i][j] *= 2
mat[i][j+1] = 0
changed = True
return mat, changed
def reverse(mat):
new_mat = []
for i in range(4):
new_mat.append([])
for j in range(4):
new_mat[i].append(mat[i][4-j-1])
return new_mat
def transpose(mat):
new_mat = []
for i in range(4):
new_mat.append([])
for j in range(4):
new_mat[i].append(mat[j][i])
return new_mat
def move_up(grid):
transposed_grid = transpose(grid)
new_grid, changed1 = compress(transposed_grid)
new_grid, changed2 = merge(new_grid)
changed = changed1 or changed2
new_grid, temp = compress(new_grid)
final_grid = transpose(new_grid)
return final_grid, changed
def move_down(grid):
transposed_grid = transpose(grid)
reverse_grid = reverse(transposed_grid)
new_grid, changed1 = compress(reverse_grid)
new_grid, changed2 = merge(new_grid)
changed = changed1 or changed2
new_grid, temp = compress(new_grid)
final_reversed_grid = reverse(new_grid)
final_grid = transpose(final_reversed_grid)
return final_grid, changed
def move_right(grid):
reverse_grid = reverse(grid)
new_grid, changed1 = compress(reverse_grid)
new_grid, changed2 = merge(new_grid)
changed = changed1 or changed2
new_grid, temp = compress(new_grid)
final_grid = reverse(new_grid)
return final_grid, changed
def move_left(grid):
new_grid, changed1 = compress(grid)
new_grid, changed2 = merge(new_grid)
changed = changed1 or changed2
new_grid, temp = compress(new_grid)
return new_grid, changed
```
|
github_jupyter
|
def start_game():
# Generateing 2-D matrix (list)
mat = []
for i in range(4):
mat.append([0]*4)
return mat
import random
def add_new_2(mat):
# Generating random position for new 2
r = random.randint(0, 3)
c = random.randint(0, 3)
while mat[r][c] != 0:
r = random.randint(0, 3)
c = random.randint(0, 3)
mat[r][c] = 2
def get_current_state(mat):
# WON
# Anywhere 2048 is present
for i in range(4):
for j in range(4):
if mat[i][j] == 2048:
return 'WON!'
# Not Over Yet
# Anywhere 0 is present
for i in range(4):
for j in range(4):
if mat[i][j] == 0:
return 'GAME NOT OVER!'
# Every row and column except last row and last column
for i in range(3):
for j in range(3):
if mat[i][j] == mat[i+1][j] or mat[i][j] == mat[i][j+1]:
return 'GAME NOT OVER!'
# Last Row
for j in range(3):
if mat[3][j] == mat[3][j+1]:
return 'GAME NOT OVER!'
# Last Column
for i in range(3):
if mat[i][3] == mat[i+1][3]:
return 'GAME NOT OVER!'
# LOST
return 'LOST!'
def compress(mat):
new_mat = []
changed = False
for i in range(4):
new_mat.append([0]*4)
for i in range(4):
pos = 0
for j in range(4):
if mat[i][j] != 0:
new_mat[i][pos] = mat[i][j]
if j != pos:
changed = True
pos += 1
return new_mat, changed
def merge(mat):
changed = False
for i in range(4):
for j in range(3):
if mat[i][j] == mat[i][j+1] and mat[i][j] != 0:
mat[i][j] *= 2
mat[i][j+1] = 0
changed = True
return mat, changed
def reverse(mat):
new_mat = []
for i in range(4):
new_mat.append([])
for j in range(4):
new_mat[i].append(mat[i][4-j-1])
return new_mat
def transpose(mat):
new_mat = []
for i in range(4):
new_mat.append([])
for j in range(4):
new_mat[i].append(mat[j][i])
return new_mat
def move_up(grid):
transposed_grid = transpose(grid)
new_grid, changed1 = compress(transposed_grid)
new_grid, changed2 = merge(new_grid)
changed = changed1 or changed2
new_grid, temp = compress(new_grid)
final_grid = transpose(new_grid)
return final_grid, changed
def move_down(grid):
transposed_grid = transpose(grid)
reverse_grid = reverse(transposed_grid)
new_grid, changed1 = compress(reverse_grid)
new_grid, changed2 = merge(new_grid)
changed = changed1 or changed2
new_grid, temp = compress(new_grid)
final_reversed_grid = reverse(new_grid)
final_grid = transpose(final_reversed_grid)
return final_grid, changed
def move_right(grid):
reverse_grid = reverse(grid)
new_grid, changed1 = compress(reverse_grid)
new_grid, changed2 = merge(new_grid)
changed = changed1 or changed2
new_grid, temp = compress(new_grid)
final_grid = reverse(new_grid)
return final_grid, changed
def move_left(grid):
new_grid, changed1 = compress(grid)
new_grid, changed2 = merge(new_grid)
changed = changed1 or changed2
new_grid, temp = compress(new_grid)
return new_grid, changed
| 0.313945 | 0.449332 |
```
import warnings
warnings.simplefilter("ignore")
warnings.filterwarnings("ignore")
import joblib
import missingno
import pandas_profiling
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
import scipy.stats as stats
from scipy.stats import zscore
from imblearn.over_sampling import SMOTE
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OrdinalEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
import xgboost as xgb
import lightgbm as lgb
from sklearn import metrics
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
df_train = pd.read_csv("train.csv")
df_test = pd.read_csv("test.csv")
df_train.isna().sum() # checking for missing values in training dataset
df_test.isna().sum() # checking for missing values in testing dataset
df_train.nunique().sort_values().to_frame("Unique Values")
df_test.nunique().sort_values().to_frame("Unique Values")
df_train
# Label Encoder
le = LabelEncoder()
df_train["Gender"] = le.fit_transform(df_train["Gender"])
df_train.head()
# Ordinal Encoder
oe = OrdinalEncoder()
oe_col = ["Income", "Product_Holdings", "Credit_Category"]
def ordinal_encode(df, column):
df[column] = oe.fit_transform(df[column])
return df
df=ordinal_encode(df_train, oe_col)
df.drop("ID", axis=1 , inplace=True)
df.head()
sns.pairplot(df_train)
# Z score method
z=np.abs(zscore(df_train))
threshold=3
np.where(z>3)
df=df_train[(z<3).all(axis=1)]
df
print("Rows and Columns before using Z Score", df_train.shape)
print("Rows and Columns after using Z Score", df.shape)
# Percentage of Data Loss
data_loss=((df_train.shape[0]-df.shape[0])/df_train.shape[0])*100
print("Total percent of data lost after Z Score to deal with outliers is", data_loss)
df.skew()
df.hist(figsize=(15,15))
plt.show()
df
X = df.drop('Is_Churn', axis=1)
Y = df['Is_Churn']
# adding samples to make all the categorical label values same
oversample = SMOTE()
X, Y = oversample.fit_resample(X, Y)
scaler = StandardScaler()
X = pd.DataFrame(scaler.fit_transform(X), columns=X.columns)
X.head()
maxAccu=0
maxRS=0
for i in range(1, 1000):
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.25, random_state=i)
lr=LogisticRegression()
lr.fit(X_train, Y_train)
pred = lr.predict(X_test)
acc_score = (accuracy_score(Y_test, pred))*100
if acc_score>maxAccu:
maxAccu=acc_score
maxRS=i
print("Best accuracy score is", maxAccu,"on Random State", maxRS)
# Classification Model Function
def classify(model, X, Y):
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.25, random_state=maxRS)
# Training the model
model.fit(X_train, Y_train)
# Predicting Y_test
pred = model.predict(X_test)
# Classification Report
class_report = classification_report(Y_test, pred)
print("\nClassification Report:\n", class_report)
# Accuracy Score
acc_score = (accuracy_score(Y_test, pred))*100
print("Accuracy Score:", acc_score)
# F1 Score
f_one_score = (f1_score(Y_test, pred, average='macro'))*100
print("F1 Score:", f_one_score)
# Cross Validation Score
cv_score = (cross_val_score(model, X, Y, cv=5).mean())*100
print("Cross Validation Score:", cv_score)
# Result of accuracy minus cv scores
result = acc_score - cv_score
print("\nAccuracy Score - Cross Validation Score is", result)
# Logistic Regression
model1=LogisticRegression()
classify(model1, X, Y)
# Support Vector Classifier
model2=SVC(C=1.0, kernel='rbf', gamma='auto', random_state=42)
classify(model2, X, Y)
# Decision Tree Classifier
model3=DecisionTreeClassifier(random_state=maxRS, max_depth=15)
classify(model3, X, Y)
# Random Forest Classifier
model4=RandomForestClassifier(max_depth=15, random_state=maxRS)
classify(model4, X, Y)
# K Neighbors Classifier
model5=KNeighborsClassifier(n_neighbors=15)
classify(model5, X, Y)
# Extra Trees Classifier
model6=ExtraTreesClassifier()
classify(model6, X, Y)
# XGB Classifier
model7=xgb.XGBClassifier(verbosity=0)
classify(model7, X, Y)
# LGBM Classifier
model8=lgb.LGBMClassifier()
classify(model8, X, Y)
# Choosing SVC Classifier
fmod_param = {'C' : [1.0, 2.0, 3.0, 4.0, 5.0],
'kernel' : ['poly', 'rbf', 'sigmoid'],
'random_state' : [21, 42, 111, 143, 808],
'gamma' : ['scale', 'auto'],
'decision_function_shape' : ['ovo', 'ovr']
}
GSCV = GridSearchCV(SVC(), fmod_param, cv=5)
GSCV.fit(X_train,Y_train)
GSCV.best_params_
Final_Model = SVC(C=5.0, decision_function_shape='ovo', gamma='scale', kernel='rbf', random_state=21)
Classifier = Final_Model.fit(X_train, Y_train)
fmod_pred = Final_Model.predict(X_test)
fmod_acc = (f1_score(Y_test, fmod_pred, average='macro'))*100
print("F1 score for the Best Model is:", fmod_acc)
df = df_test.drop("ID", axis=1)
df
# Label Encoder
le = LabelEncoder()
df["Gender"] = le.fit_transform(df["Gender"])
# Ordinal Encoder
oe = OrdinalEncoder()
oe_col = ["Income", "Product_Holdings", "Credit_Category"]
def ordinal_encode(df, column):
df[column] = oe.fit_transform(df[column])
return df
X=ordinal_encode(df, oe_col)
# Feature Scaling
scaler = StandardScaler()
X = pd.DataFrame(scaler.fit_transform(X), columns=X.columns)
X.head()
Predicted_Churn = Final_Model.predict(X)
# Checking the predicted churn details and storing in dataframe format
predicted_output = pd.DataFrame()
predicted_output['ID'] = df_test["ID"]
predicted_output['Is_Churn'] = Predicted_Churn
predicted_output
predicted_output.to_csv("sample_submission_solutionfile.csv", index=False)
```
|
github_jupyter
|
import warnings
warnings.simplefilter("ignore")
warnings.filterwarnings("ignore")
import joblib
import missingno
import pandas_profiling
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
import scipy.stats as stats
from scipy.stats import zscore
from imblearn.over_sampling import SMOTE
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OrdinalEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
import xgboost as xgb
import lightgbm as lgb
from sklearn import metrics
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
df_train = pd.read_csv("train.csv")
df_test = pd.read_csv("test.csv")
df_train.isna().sum() # checking for missing values in training dataset
df_test.isna().sum() # checking for missing values in testing dataset
df_train.nunique().sort_values().to_frame("Unique Values")
df_test.nunique().sort_values().to_frame("Unique Values")
df_train
# Label Encoder
le = LabelEncoder()
df_train["Gender"] = le.fit_transform(df_train["Gender"])
df_train.head()
# Ordinal Encoder
oe = OrdinalEncoder()
oe_col = ["Income", "Product_Holdings", "Credit_Category"]
def ordinal_encode(df, column):
df[column] = oe.fit_transform(df[column])
return df
df=ordinal_encode(df_train, oe_col)
df.drop("ID", axis=1 , inplace=True)
df.head()
sns.pairplot(df_train)
# Z score method
z=np.abs(zscore(df_train))
threshold=3
np.where(z>3)
df=df_train[(z<3).all(axis=1)]
df
print("Rows and Columns before using Z Score", df_train.shape)
print("Rows and Columns after using Z Score", df.shape)
# Percentage of Data Loss
data_loss=((df_train.shape[0]-df.shape[0])/df_train.shape[0])*100
print("Total percent of data lost after Z Score to deal with outliers is", data_loss)
df.skew()
df.hist(figsize=(15,15))
plt.show()
df
X = df.drop('Is_Churn', axis=1)
Y = df['Is_Churn']
# adding samples to make all the categorical label values same
oversample = SMOTE()
X, Y = oversample.fit_resample(X, Y)
scaler = StandardScaler()
X = pd.DataFrame(scaler.fit_transform(X), columns=X.columns)
X.head()
maxAccu=0
maxRS=0
for i in range(1, 1000):
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.25, random_state=i)
lr=LogisticRegression()
lr.fit(X_train, Y_train)
pred = lr.predict(X_test)
acc_score = (accuracy_score(Y_test, pred))*100
if acc_score>maxAccu:
maxAccu=acc_score
maxRS=i
print("Best accuracy score is", maxAccu,"on Random State", maxRS)
# Classification Model Function
def classify(model, X, Y):
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.25, random_state=maxRS)
# Training the model
model.fit(X_train, Y_train)
# Predicting Y_test
pred = model.predict(X_test)
# Classification Report
class_report = classification_report(Y_test, pred)
print("\nClassification Report:\n", class_report)
# Accuracy Score
acc_score = (accuracy_score(Y_test, pred))*100
print("Accuracy Score:", acc_score)
# F1 Score
f_one_score = (f1_score(Y_test, pred, average='macro'))*100
print("F1 Score:", f_one_score)
# Cross Validation Score
cv_score = (cross_val_score(model, X, Y, cv=5).mean())*100
print("Cross Validation Score:", cv_score)
# Result of accuracy minus cv scores
result = acc_score - cv_score
print("\nAccuracy Score - Cross Validation Score is", result)
# Logistic Regression
model1=LogisticRegression()
classify(model1, X, Y)
# Support Vector Classifier
model2=SVC(C=1.0, kernel='rbf', gamma='auto', random_state=42)
classify(model2, X, Y)
# Decision Tree Classifier
model3=DecisionTreeClassifier(random_state=maxRS, max_depth=15)
classify(model3, X, Y)
# Random Forest Classifier
model4=RandomForestClassifier(max_depth=15, random_state=maxRS)
classify(model4, X, Y)
# K Neighbors Classifier
model5=KNeighborsClassifier(n_neighbors=15)
classify(model5, X, Y)
# Extra Trees Classifier
model6=ExtraTreesClassifier()
classify(model6, X, Y)
# XGB Classifier
model7=xgb.XGBClassifier(verbosity=0)
classify(model7, X, Y)
# LGBM Classifier
model8=lgb.LGBMClassifier()
classify(model8, X, Y)
# Choosing SVC Classifier
fmod_param = {'C' : [1.0, 2.0, 3.0, 4.0, 5.0],
'kernel' : ['poly', 'rbf', 'sigmoid'],
'random_state' : [21, 42, 111, 143, 808],
'gamma' : ['scale', 'auto'],
'decision_function_shape' : ['ovo', 'ovr']
}
GSCV = GridSearchCV(SVC(), fmod_param, cv=5)
GSCV.fit(X_train,Y_train)
GSCV.best_params_
Final_Model = SVC(C=5.0, decision_function_shape='ovo', gamma='scale', kernel='rbf', random_state=21)
Classifier = Final_Model.fit(X_train, Y_train)
fmod_pred = Final_Model.predict(X_test)
fmod_acc = (f1_score(Y_test, fmod_pred, average='macro'))*100
print("F1 score for the Best Model is:", fmod_acc)
df = df_test.drop("ID", axis=1)
df
# Label Encoder
le = LabelEncoder()
df["Gender"] = le.fit_transform(df["Gender"])
# Ordinal Encoder
oe = OrdinalEncoder()
oe_col = ["Income", "Product_Holdings", "Credit_Category"]
def ordinal_encode(df, column):
df[column] = oe.fit_transform(df[column])
return df
X=ordinal_encode(df, oe_col)
# Feature Scaling
scaler = StandardScaler()
X = pd.DataFrame(scaler.fit_transform(X), columns=X.columns)
X.head()
Predicted_Churn = Final_Model.predict(X)
# Checking the predicted churn details and storing in dataframe format
predicted_output = pd.DataFrame()
predicted_output['ID'] = df_test["ID"]
predicted_output['Is_Churn'] = Predicted_Churn
predicted_output
predicted_output.to_csv("sample_submission_solutionfile.csv", index=False)
| 0.663669 | 0.414069 |
```
# Run this to ensure TensorFlow 2.x is used
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
import json
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
import numpy as np
import matplotlib.pyplot as plt
import io
vocab_size = 10000 #maximum number of tokens
embedding_dim = 16 #used in the model
max_length = 100
trunc_type='post'
padding_type='post'
oov_tok = "????"
training_size = 20000
!wget --no-check-certificate \
https://storage.googleapis.com/laurencemoroney-blog.appspot.com/sarcasm.json -O sarcasm.json
with open("sarcasm.json", 'r') as f:
datastore = json.load(f)
sentences = []
labels = []
for item in datastore:
sentences.append(item['headline'])
labels.append(item['is_sarcastic'])
training_sentences = sentences[0:training_size]
testing_sentences = sentences[training_size:]
training_labels = labels[0:training_size]
testing_labels = labels[training_size:]
tokenizer = Tokenizer(num_words = vocab_size, oov_token = oov_tok)
tokenizer.fit_on_texts(training_sentences) # This assigns numbers to each word
word_index = tokenizer.word_index # This stores the assigned number-word pair in a dictionary
# Converting Training Data to sequences and then padding them
training_sequences = tokenizer.texts_to_sequences(training_sentences)
training_sequences_padded = pad_sequences(training_sequences)
# Converting Testing Data to sequences and then padding them
testing_sequences = tokenizer.texts_to_sequences(testing_sentences)
testing_sequences_padded = pad_sequences(testing_sequences)
# Information about training data
print("\nPadded Sequences:")
print(training_sequences_padded)
print("\nShape:", training_sequences_padded.shape)
print("\nThe Maximum length is: ", max(len(i) for i in training_sequences_padded))
# Information about testing data
print("\nPadded Sequences:")
print(testing_sequences_padded)
print("\nShape:", testing_sequences_padded.shape)
print("\nThe Maximum length is: ", max(len(i) for i in testing_sequences_padded))
training_sequences_padded = np.array(training_sequences_padded)
training_labels = np.array(training_labels)
testing_sequences_padded = np.array(testing_sequences_padded)
testing_labels = np.array(testing_labels)
model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(24, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
print(model.summary())
num_epochs = 30
history = model.fit(training_sequences_padded, training_labels, epochs=num_epochs, validation_data=(testing_sequences_padded, testing_labels), verbose=2)
def plot_graphs(history, string):
plt.plot(history.history[string])
plt.plot(history.history['val_'+string])
plt.xlabel("Epochs")
plt.ylabel(string)
plt.legend([string, 'val_'+string])
plt.show()
plot_graphs(history, "accuracy")
plot_graphs(history, "loss")
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
def decode_sentence(text):
return ' '.join([reverse_word_index.get(i, '?') for i in text])
print(decode_sentence(training_sequences_padded[0]))
print(training_sentences[2])
print(labels[2])
e = model.layers[0]
weights = e.get_weights()[0]
print(weights.shape) # shape: (vocab_size, embedding_dim)
out_v = io.open('vecs.tsv', 'w', encoding='utf-8')
out_m = io.open('meta.tsv', 'w', encoding='utf-8')
for word_num in range(1, vocab_size):
word = reverse_word_index[word_num]
embeddings = weights[word_num]
out_m.write(word + "\n")
out_v.write('\t'.join([str(x) for x in embeddings]) + "\n")
out_v.close()
out_m.close()
sentence = ["fuck you bitch asshole", "game of thrones season finale showing this sunday night"]
sequences = tokenizer.texts_to_sequences(sentence)
padded = pad_sequences(sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)
print(model.predict(padded))
```
|
github_jupyter
|
# Run this to ensure TensorFlow 2.x is used
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
import json
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
import numpy as np
import matplotlib.pyplot as plt
import io
vocab_size = 10000 #maximum number of tokens
embedding_dim = 16 #used in the model
max_length = 100
trunc_type='post'
padding_type='post'
oov_tok = "????"
training_size = 20000
!wget --no-check-certificate \
https://storage.googleapis.com/laurencemoroney-blog.appspot.com/sarcasm.json -O sarcasm.json
with open("sarcasm.json", 'r') as f:
datastore = json.load(f)
sentences = []
labels = []
for item in datastore:
sentences.append(item['headline'])
labels.append(item['is_sarcastic'])
training_sentences = sentences[0:training_size]
testing_sentences = sentences[training_size:]
training_labels = labels[0:training_size]
testing_labels = labels[training_size:]
tokenizer = Tokenizer(num_words = vocab_size, oov_token = oov_tok)
tokenizer.fit_on_texts(training_sentences) # This assigns numbers to each word
word_index = tokenizer.word_index # This stores the assigned number-word pair in a dictionary
# Converting Training Data to sequences and then padding them
training_sequences = tokenizer.texts_to_sequences(training_sentences)
training_sequences_padded = pad_sequences(training_sequences)
# Converting Testing Data to sequences and then padding them
testing_sequences = tokenizer.texts_to_sequences(testing_sentences)
testing_sequences_padded = pad_sequences(testing_sequences)
# Information about training data
print("\nPadded Sequences:")
print(training_sequences_padded)
print("\nShape:", training_sequences_padded.shape)
print("\nThe Maximum length is: ", max(len(i) for i in training_sequences_padded))
# Information about testing data
print("\nPadded Sequences:")
print(testing_sequences_padded)
print("\nShape:", testing_sequences_padded.shape)
print("\nThe Maximum length is: ", max(len(i) for i in testing_sequences_padded))
training_sequences_padded = np.array(training_sequences_padded)
training_labels = np.array(training_labels)
testing_sequences_padded = np.array(testing_sequences_padded)
testing_labels = np.array(testing_labels)
model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(24, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
print(model.summary())
num_epochs = 30
history = model.fit(training_sequences_padded, training_labels, epochs=num_epochs, validation_data=(testing_sequences_padded, testing_labels), verbose=2)
def plot_graphs(history, string):
plt.plot(history.history[string])
plt.plot(history.history['val_'+string])
plt.xlabel("Epochs")
plt.ylabel(string)
plt.legend([string, 'val_'+string])
plt.show()
plot_graphs(history, "accuracy")
plot_graphs(history, "loss")
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
def decode_sentence(text):
return ' '.join([reverse_word_index.get(i, '?') for i in text])
print(decode_sentence(training_sequences_padded[0]))
print(training_sentences[2])
print(labels[2])
e = model.layers[0]
weights = e.get_weights()[0]
print(weights.shape) # shape: (vocab_size, embedding_dim)
out_v = io.open('vecs.tsv', 'w', encoding='utf-8')
out_m = io.open('meta.tsv', 'w', encoding='utf-8')
for word_num in range(1, vocab_size):
word = reverse_word_index[word_num]
embeddings = weights[word_num]
out_m.write(word + "\n")
out_v.write('\t'.join([str(x) for x in embeddings]) + "\n")
out_v.close()
out_m.close()
sentence = ["fuck you bitch asshole", "game of thrones season finale showing this sunday night"]
sequences = tokenizer.texts_to_sequences(sentence)
padded = pad_sequences(sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)
print(model.predict(padded))
| 0.669745 | 0.766075 |
## Dependencies
```
from openvaccine_scripts import *
import warnings, json
from sklearn.model_selection import KFold, StratifiedKFold
import tensorflow.keras.layers as L
import tensorflow.keras.backend as K
from tensorflow.keras import optimizers, losses, Model
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
SEED = 0
seed_everything(SEED)
warnings.filterwarnings('ignore')
```
# Model parameters
```
config = {
"BATCH_SIZE": 64,
"EPOCHS": 120,
"LEARNING_RATE": 1e-3,
"ES_PATIENCE": 10,
"N_FOLDS": 5,
"N_USED_FOLDS": 5,
"PB_SEQ_LEN": 107,
"PV_SEQ_LEN": 130,
}
with open('config.json', 'w') as json_file:
json.dump(json.loads(json.dumps(config)), json_file)
config
```
# Load data
```
database_base_path = '/kaggle/input/stanford-covid-vaccine/'
train = pd.read_json(database_base_path + 'train.json', lines=True)
test = pd.read_json(database_base_path + 'test.json', lines=True)
print('Train samples: %d' % len(train))
display(train.head())
print(f'Test samples: {len(test)}')
display(test.head())
```
## Auxiliary functions
```
def get_dataset(x, y=None, labeled=True, shuffled=True, batch_size=32, buffer_size=-1, seed=0):
if labeled:
dataset = tf.data.Dataset.from_tensor_slices(({'inputs_seq': x[:, 0, :, :],
'inputs_struct': x[:, 1, :, :],
'inputs_loop': x[:, 2, :, :]},
{'outputs': y}))
else:
dataset = tf.data.Dataset.from_tensor_slices(({'inputs_seq': x[:, 0, :, :],
'inputs_struct': x[:, 1, :, :],
'inputs_loop': x[:, 2, :, :]}))
if shuffled:
dataset = dataset.shuffle(2048, seed=seed)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(buffer_size)
return dataset
def get_dataset_sampling(x, y=None, shuffled=True, seed=0):
dataset = tf.data.Dataset.from_tensor_slices(({'inputs_seq': x[:, 0, :, :],
'inputs_struct': x[:, 1, :, :],
'inputs_loop': x[:, 2, :, :]},
{'outputs': y}))
if shuffled:
dataset = dataset.shuffle(2048, seed=seed)
return dataset
```
# Model
```
def model_fn(hidden_dim=384, dropout=.5, pred_len=68, n_outputs=5):
inputs_seq = L.Input(shape=(None, 1), name='inputs_seq')
inputs_struct = L.Input(shape=(None, 1), name='inputs_struct')
inputs_loop = L.Input(shape=(None, 1), name='inputs_loop')
def _one_hot(x, num_classes):
return K.squeeze(K.one_hot(K.cast(x, 'uint8'), num_classes=num_classes), axis=2)
ohe_seq = L.Lambda(_one_hot, arguments={'num_classes': len(token2int_seq)}, input_shape=(None, 1))(inputs_seq)
ohe_struct = L.Lambda(_one_hot, arguments={'num_classes': len(token2int_struct)}, input_shape=(None, 1))(inputs_struct)
ohe_loop = L.Lambda(_one_hot, arguments={'num_classes': len(token2int_loop)}, input_shape=(None, 1))(inputs_loop)
x_concat = L.concatenate([ohe_seq, ohe_struct, ohe_loop], axis=-1, name='ohe_concatenate')
x = L.Conv1D(filters=256,
kernel_size=5,
padding='same')(x_concat)
x = L.Bidirectional(L.GRU(hidden_dim, dropout=dropout, return_sequences=True, kernel_initializer='orthogonal'))(x)
x = L.Bidirectional(L.GRU(hidden_dim, dropout=dropout, return_sequences=True, kernel_initializer='orthogonal'))(x)
# Since we are only making predictions on the first part of each sequence, we have to truncate it
x_truncated = x[:, :pred_len]
outputs = L.Dense(n_outputs, activation='linear', name='outputs')(x_truncated)
model = Model(inputs=[inputs_seq, inputs_struct, inputs_loop], outputs=outputs)
opt = optimizers.Adam(learning_rate=config['LEARNING_RATE'])
model.compile(optimizer=opt, loss=MCRMSE)
return model
model = model_fn()
model.summary()
```
# Pre-process
```
feature_cols = ['sequence', 'structure', 'predicted_loop_type']
pred_cols = ['reactivity', 'deg_Mg_pH10', 'deg_pH10', 'deg_Mg_50C', 'deg_50C']
encoder_list = [token2int_seq, token2int_struct, token2int_loop]
train_features = np.array([preprocess_inputs(train, encoder_list[idx], [col]) for idx, col in enumerate(feature_cols)]).transpose((1, 0, 2, 3))
train_labels = np.array(train[pred_cols].values.tolist()).transpose((0, 2, 1))
public_test = test.query("seq_length == 107").copy()
private_test = test.query("seq_length == 130").copy()
x_test_public = np.array([preprocess_inputs(public_test, encoder_list[idx], [col]) for idx, col in enumerate(feature_cols)]).transpose((1, 0, 2, 3))
x_test_private = np.array([preprocess_inputs(private_test, encoder_list[idx], [col]) for idx, col in enumerate(feature_cols)]).transpose((1, 0, 2, 3))
# To use as stratified col
train['signal_to_noise_int'] = train['signal_to_noise'].astype(int)
```
# Training
```
AUTO = tf.data.experimental.AUTOTUNE
skf = StratifiedKFold(n_splits=config['N_USED_FOLDS'], shuffle=True, random_state=SEED)
history_list = []
oof = train[['id']].copy()
oof_preds = np.zeros(train_labels.shape)
test_public_preds = np.zeros((x_test_public.shape[0], config['PB_SEQ_LEN'], len(pred_cols)))
test_private_preds = np.zeros((x_test_private.shape[0], config['PV_SEQ_LEN'], len(pred_cols)))
for fold,(train_idx, valid_idx) in enumerate(skf.split(train_labels, train['signal_to_noise_int'])):
if fold >= config['N_USED_FOLDS']:
break
print(f'\nFOLD: {fold+1}')
### Create datasets
x_train = train_features[train_idx]
y_train = train_labels[train_idx]
x_valid = train_features[valid_idx]
y_valid = train_labels[valid_idx]
train_ds = get_dataset(x_train, y_train, labeled=True, shuffled=True, batch_size=config['BATCH_SIZE'], buffer_size=AUTO, seed=SEED)
valid_ds = get_dataset(x_valid, y_valid, labeled=True, shuffled=False, batch_size=config['BATCH_SIZE'], buffer_size=AUTO, seed=SEED)
oof_ds = get_dataset(train_features[valid_idx], labeled=False, shuffled=False, batch_size=config['BATCH_SIZE'], buffer_size=AUTO, seed=SEED)
test_public_ds = get_dataset(x_test_public, labeled=False, shuffled=False, batch_size=config['BATCH_SIZE'], buffer_size=AUTO, seed=SEED)
test_private_ds = get_dataset(x_test_private, labeled=False, shuffled=False, batch_size=config['BATCH_SIZE'], buffer_size=AUTO, seed=SEED)
### Model
K.clear_session()
model = model_fn()
model_path = f'model_{fold}.h5'
es = EarlyStopping(monitor='val_loss', mode='min', patience=config['ES_PATIENCE'], restore_best_weights=True, verbose=1)
rlrp = ReduceLROnPlateau(monitor='val_loss', mode='min', factor=0.1, patience=5, verbose=1)
### Train
history = model.fit(train_ds,
validation_data=valid_ds,
callbacks=[es, rlrp],
epochs=config['EPOCHS'],
batch_size=config['BATCH_SIZE'],
verbose=2).history
history_list.append(history)
# Save last model weights
model.save_weights(model_path)
### Inference
oof_preds[valid_idx] = model.predict(oof_ds)
# Short sequence (public test)
model = model_fn(pred_len= config['PB_SEQ_LEN'])
model.load_weights(model_path)
test_public_preds += model.predict(test_public_ds) * (1 / config['N_USED_FOLDS'])
# Long sequence (private test)
model = model_fn(pred_len= config['PV_SEQ_LEN'])
model.load_weights(model_path)
test_private_preds += model.predict(test_private_ds) * (1 / config['N_USED_FOLDS'])
```
## Model loss graph
```
for fold, history in enumerate(history_list):
print(f'\nFOLD: {fold+1}')
print(f"Train {np.array(history['loss']).min():.5f} Validation {np.array(history['val_loss']).min():.5f}")
plot_metrics_agg(history_list)
```
# Post-processing
```
# Assign values to OOF set
# Assign labels
for idx, col in enumerate(pred_cols):
val = train_labels[:, :, idx]
oof = oof.assign(**{col: list(val)})
# Assign preds
for idx, col in enumerate(pred_cols):
val = oof_preds[:, :, idx]
oof = oof.assign(**{f'{col}_pred': list(val)})
# Assign values to test set
preds_ls = []
for df, preds in [(public_test, test_public_preds), (private_test, test_private_preds)]:
for i, uid in enumerate(df.id):
single_pred = preds[i]
single_df = pd.DataFrame(single_pred, columns=pred_cols)
single_df['id_seqpos'] = [f'{uid}_{x}' for x in range(single_df.shape[0])]
preds_ls.append(single_df)
preds_df = pd.concat(preds_ls)
```
# Model evaluation
```
display(evaluate_model(train, train_labels, oof_preds, pred_cols))
```
# Visualize test predictions
```
submission = pd.read_csv(database_base_path + 'sample_submission.csv')
submission = submission[['id_seqpos']].merge(preds_df, on=['id_seqpos'])
```
# Test set predictions
```
display(submission.head(10))
display(submission.describe())
submission.to_csv('submission.csv', index=False)
```
|
github_jupyter
|
from openvaccine_scripts import *
import warnings, json
from sklearn.model_selection import KFold, StratifiedKFold
import tensorflow.keras.layers as L
import tensorflow.keras.backend as K
from tensorflow.keras import optimizers, losses, Model
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
SEED = 0
seed_everything(SEED)
warnings.filterwarnings('ignore')
config = {
"BATCH_SIZE": 64,
"EPOCHS": 120,
"LEARNING_RATE": 1e-3,
"ES_PATIENCE": 10,
"N_FOLDS": 5,
"N_USED_FOLDS": 5,
"PB_SEQ_LEN": 107,
"PV_SEQ_LEN": 130,
}
with open('config.json', 'w') as json_file:
json.dump(json.loads(json.dumps(config)), json_file)
config
database_base_path = '/kaggle/input/stanford-covid-vaccine/'
train = pd.read_json(database_base_path + 'train.json', lines=True)
test = pd.read_json(database_base_path + 'test.json', lines=True)
print('Train samples: %d' % len(train))
display(train.head())
print(f'Test samples: {len(test)}')
display(test.head())
def get_dataset(x, y=None, labeled=True, shuffled=True, batch_size=32, buffer_size=-1, seed=0):
if labeled:
dataset = tf.data.Dataset.from_tensor_slices(({'inputs_seq': x[:, 0, :, :],
'inputs_struct': x[:, 1, :, :],
'inputs_loop': x[:, 2, :, :]},
{'outputs': y}))
else:
dataset = tf.data.Dataset.from_tensor_slices(({'inputs_seq': x[:, 0, :, :],
'inputs_struct': x[:, 1, :, :],
'inputs_loop': x[:, 2, :, :]}))
if shuffled:
dataset = dataset.shuffle(2048, seed=seed)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(buffer_size)
return dataset
def get_dataset_sampling(x, y=None, shuffled=True, seed=0):
dataset = tf.data.Dataset.from_tensor_slices(({'inputs_seq': x[:, 0, :, :],
'inputs_struct': x[:, 1, :, :],
'inputs_loop': x[:, 2, :, :]},
{'outputs': y}))
if shuffled:
dataset = dataset.shuffle(2048, seed=seed)
return dataset
def model_fn(hidden_dim=384, dropout=.5, pred_len=68, n_outputs=5):
inputs_seq = L.Input(shape=(None, 1), name='inputs_seq')
inputs_struct = L.Input(shape=(None, 1), name='inputs_struct')
inputs_loop = L.Input(shape=(None, 1), name='inputs_loop')
def _one_hot(x, num_classes):
return K.squeeze(K.one_hot(K.cast(x, 'uint8'), num_classes=num_classes), axis=2)
ohe_seq = L.Lambda(_one_hot, arguments={'num_classes': len(token2int_seq)}, input_shape=(None, 1))(inputs_seq)
ohe_struct = L.Lambda(_one_hot, arguments={'num_classes': len(token2int_struct)}, input_shape=(None, 1))(inputs_struct)
ohe_loop = L.Lambda(_one_hot, arguments={'num_classes': len(token2int_loop)}, input_shape=(None, 1))(inputs_loop)
x_concat = L.concatenate([ohe_seq, ohe_struct, ohe_loop], axis=-1, name='ohe_concatenate')
x = L.Conv1D(filters=256,
kernel_size=5,
padding='same')(x_concat)
x = L.Bidirectional(L.GRU(hidden_dim, dropout=dropout, return_sequences=True, kernel_initializer='orthogonal'))(x)
x = L.Bidirectional(L.GRU(hidden_dim, dropout=dropout, return_sequences=True, kernel_initializer='orthogonal'))(x)
# Since we are only making predictions on the first part of each sequence, we have to truncate it
x_truncated = x[:, :pred_len]
outputs = L.Dense(n_outputs, activation='linear', name='outputs')(x_truncated)
model = Model(inputs=[inputs_seq, inputs_struct, inputs_loop], outputs=outputs)
opt = optimizers.Adam(learning_rate=config['LEARNING_RATE'])
model.compile(optimizer=opt, loss=MCRMSE)
return model
model = model_fn()
model.summary()
feature_cols = ['sequence', 'structure', 'predicted_loop_type']
pred_cols = ['reactivity', 'deg_Mg_pH10', 'deg_pH10', 'deg_Mg_50C', 'deg_50C']
encoder_list = [token2int_seq, token2int_struct, token2int_loop]
train_features = np.array([preprocess_inputs(train, encoder_list[idx], [col]) for idx, col in enumerate(feature_cols)]).transpose((1, 0, 2, 3))
train_labels = np.array(train[pred_cols].values.tolist()).transpose((0, 2, 1))
public_test = test.query("seq_length == 107").copy()
private_test = test.query("seq_length == 130").copy()
x_test_public = np.array([preprocess_inputs(public_test, encoder_list[idx], [col]) for idx, col in enumerate(feature_cols)]).transpose((1, 0, 2, 3))
x_test_private = np.array([preprocess_inputs(private_test, encoder_list[idx], [col]) for idx, col in enumerate(feature_cols)]).transpose((1, 0, 2, 3))
# To use as stratified col
train['signal_to_noise_int'] = train['signal_to_noise'].astype(int)
AUTO = tf.data.experimental.AUTOTUNE
skf = StratifiedKFold(n_splits=config['N_USED_FOLDS'], shuffle=True, random_state=SEED)
history_list = []
oof = train[['id']].copy()
oof_preds = np.zeros(train_labels.shape)
test_public_preds = np.zeros((x_test_public.shape[0], config['PB_SEQ_LEN'], len(pred_cols)))
test_private_preds = np.zeros((x_test_private.shape[0], config['PV_SEQ_LEN'], len(pred_cols)))
for fold,(train_idx, valid_idx) in enumerate(skf.split(train_labels, train['signal_to_noise_int'])):
if fold >= config['N_USED_FOLDS']:
break
print(f'\nFOLD: {fold+1}')
### Create datasets
x_train = train_features[train_idx]
y_train = train_labels[train_idx]
x_valid = train_features[valid_idx]
y_valid = train_labels[valid_idx]
train_ds = get_dataset(x_train, y_train, labeled=True, shuffled=True, batch_size=config['BATCH_SIZE'], buffer_size=AUTO, seed=SEED)
valid_ds = get_dataset(x_valid, y_valid, labeled=True, shuffled=False, batch_size=config['BATCH_SIZE'], buffer_size=AUTO, seed=SEED)
oof_ds = get_dataset(train_features[valid_idx], labeled=False, shuffled=False, batch_size=config['BATCH_SIZE'], buffer_size=AUTO, seed=SEED)
test_public_ds = get_dataset(x_test_public, labeled=False, shuffled=False, batch_size=config['BATCH_SIZE'], buffer_size=AUTO, seed=SEED)
test_private_ds = get_dataset(x_test_private, labeled=False, shuffled=False, batch_size=config['BATCH_SIZE'], buffer_size=AUTO, seed=SEED)
### Model
K.clear_session()
model = model_fn()
model_path = f'model_{fold}.h5'
es = EarlyStopping(monitor='val_loss', mode='min', patience=config['ES_PATIENCE'], restore_best_weights=True, verbose=1)
rlrp = ReduceLROnPlateau(monitor='val_loss', mode='min', factor=0.1, patience=5, verbose=1)
### Train
history = model.fit(train_ds,
validation_data=valid_ds,
callbacks=[es, rlrp],
epochs=config['EPOCHS'],
batch_size=config['BATCH_SIZE'],
verbose=2).history
history_list.append(history)
# Save last model weights
model.save_weights(model_path)
### Inference
oof_preds[valid_idx] = model.predict(oof_ds)
# Short sequence (public test)
model = model_fn(pred_len= config['PB_SEQ_LEN'])
model.load_weights(model_path)
test_public_preds += model.predict(test_public_ds) * (1 / config['N_USED_FOLDS'])
# Long sequence (private test)
model = model_fn(pred_len= config['PV_SEQ_LEN'])
model.load_weights(model_path)
test_private_preds += model.predict(test_private_ds) * (1 / config['N_USED_FOLDS'])
for fold, history in enumerate(history_list):
print(f'\nFOLD: {fold+1}')
print(f"Train {np.array(history['loss']).min():.5f} Validation {np.array(history['val_loss']).min():.5f}")
plot_metrics_agg(history_list)
# Assign values to OOF set
# Assign labels
for idx, col in enumerate(pred_cols):
val = train_labels[:, :, idx]
oof = oof.assign(**{col: list(val)})
# Assign preds
for idx, col in enumerate(pred_cols):
val = oof_preds[:, :, idx]
oof = oof.assign(**{f'{col}_pred': list(val)})
# Assign values to test set
preds_ls = []
for df, preds in [(public_test, test_public_preds), (private_test, test_private_preds)]:
for i, uid in enumerate(df.id):
single_pred = preds[i]
single_df = pd.DataFrame(single_pred, columns=pred_cols)
single_df['id_seqpos'] = [f'{uid}_{x}' for x in range(single_df.shape[0])]
preds_ls.append(single_df)
preds_df = pd.concat(preds_ls)
display(evaluate_model(train, train_labels, oof_preds, pred_cols))
submission = pd.read_csv(database_base_path + 'sample_submission.csv')
submission = submission[['id_seqpos']].merge(preds_df, on=['id_seqpos'])
display(submission.head(10))
display(submission.describe())
submission.to_csv('submission.csv', index=False)
| 0.622459 | 0.706866 |
```
import numpy
import pandas
import re
import os
season = 2016
input_file_name = 'data/rankings/{}_composite_rankings.csv'.format(season)
def pattern_match(pattern, string):
return (re.search(pattern, string) is not None)
special_columns = ['Team', 'Rank', 'Conf', 'Record', 'Mean', 'Median', 'St.Dev']
def get_fields(width, line, data_type, n=1):
data = list()
for i in range(n):
y = line[:width]
#print '"{}"'.format(y)
z = numpy.nan if y.strip() == '' else data_type(y.strip())
data.append(z)
line = line[width:]
return (data, line)
def parse_line(line):
ranker_width = 4
section_width = 2
rank_width = 5
team_width = 17
conf_width = 5
record_width = 7
team_short_width = 9
float_width = 6
float_2_width = 7
data = list()
temp_line = line
# First Block
temp_data, temp_line = get_fields(ranker_width, temp_line, int, 5)
data.extend(temp_data)
temp_data, temp_line = get_fields(section_width, temp_line, str)
temp_data, temp_line = get_fields(ranker_width, temp_line, int, 5)
data.extend(temp_data)
temp_data, temp_line = get_fields(rank_width, temp_line, int)
data.extend(temp_data)
temp_data, temp_line = get_fields(team_width, temp_line, str)
data.extend(temp_data)
temp_data, temp_line = get_fields(conf_width, temp_line, str)
data.extend(temp_data)
temp_data, temp_line = get_fields(record_width, temp_line, str)
data.extend(temp_data)
# Blocks 2 through 4
for i in range(3):
for j in range(3):
temp_data, temp_line = get_fields(section_width, temp_line, str)
temp_data, temp_line = get_fields(ranker_width, temp_line, int, 5)
data.extend(temp_data)
temp_data, temp_line = get_fields(rank_width, temp_line, int)
data.extend(temp_data)
temp_data, temp_line = get_fields(team_short_width, temp_line, str)
data.extend(temp_data)
# Block 5
for j in range(1):
temp_data, temp_line = get_fields(section_width, temp_line, str)
temp_data, temp_line = get_fields(ranker_width, temp_line, int, 5)
data.extend(temp_data)
temp_data, temp_line = get_fields(section_width, temp_line, str)
temp_data, temp_line = get_fields(ranker_width, temp_line, int, 2)
data.extend(temp_data)
temp_data, temp_line = get_fields(section_width, temp_line, str)
temp_data, temp_line = get_fields(float_width, temp_line, float, 2)
data.extend(temp_data)
temp_data, temp_line = get_fields(float_2_width, temp_line, float)
data.extend(temp_data)
# print zip(header[:len(data)], data)
# print temp_line
return data
with open(input_file_name, 'r') as input_file:
for line_number, line in enumerate(input_file):
if line_number == 0:
header = map(lambda s: s.strip().strip(','), line.split())
df_header = list()
for f in header:
if f not in df_header:
df_header.append(f)
df_dict = dict([(f, list()) for f in df_header])
continue
# skip empty lines
if line.strip() == '':
continue
# Check for a duplicate header line
duplicate_header = map(lambda s: s.strip().strip(','), line.split())
if header == duplicate_header:
continue
data = parse_line(line)
recorded = list()
for f, x in zip(header, data):
if f not in recorded:
df_dict[f].append(x)
recorded.append(f)
df = pandas.DataFrame(df_dict)
ranker_list = sorted(list(set(df.columns) - set(special_columns)))
feature_list = list(special_columns) + ranker_list
for ranker in ranker_list:
df[ranker] = df[ranker].fillna(df['Median'])
df[feature_list][:5]
output_file = 'data/rankings/{}_composite_rankings.clean.csv'.format(season)
df[feature_list].to_csv(output_file, sep='|')
```
|
github_jupyter
|
import numpy
import pandas
import re
import os
season = 2016
input_file_name = 'data/rankings/{}_composite_rankings.csv'.format(season)
def pattern_match(pattern, string):
return (re.search(pattern, string) is not None)
special_columns = ['Team', 'Rank', 'Conf', 'Record', 'Mean', 'Median', 'St.Dev']
def get_fields(width, line, data_type, n=1):
data = list()
for i in range(n):
y = line[:width]
#print '"{}"'.format(y)
z = numpy.nan if y.strip() == '' else data_type(y.strip())
data.append(z)
line = line[width:]
return (data, line)
def parse_line(line):
ranker_width = 4
section_width = 2
rank_width = 5
team_width = 17
conf_width = 5
record_width = 7
team_short_width = 9
float_width = 6
float_2_width = 7
data = list()
temp_line = line
# First Block
temp_data, temp_line = get_fields(ranker_width, temp_line, int, 5)
data.extend(temp_data)
temp_data, temp_line = get_fields(section_width, temp_line, str)
temp_data, temp_line = get_fields(ranker_width, temp_line, int, 5)
data.extend(temp_data)
temp_data, temp_line = get_fields(rank_width, temp_line, int)
data.extend(temp_data)
temp_data, temp_line = get_fields(team_width, temp_line, str)
data.extend(temp_data)
temp_data, temp_line = get_fields(conf_width, temp_line, str)
data.extend(temp_data)
temp_data, temp_line = get_fields(record_width, temp_line, str)
data.extend(temp_data)
# Blocks 2 through 4
for i in range(3):
for j in range(3):
temp_data, temp_line = get_fields(section_width, temp_line, str)
temp_data, temp_line = get_fields(ranker_width, temp_line, int, 5)
data.extend(temp_data)
temp_data, temp_line = get_fields(rank_width, temp_line, int)
data.extend(temp_data)
temp_data, temp_line = get_fields(team_short_width, temp_line, str)
data.extend(temp_data)
# Block 5
for j in range(1):
temp_data, temp_line = get_fields(section_width, temp_line, str)
temp_data, temp_line = get_fields(ranker_width, temp_line, int, 5)
data.extend(temp_data)
temp_data, temp_line = get_fields(section_width, temp_line, str)
temp_data, temp_line = get_fields(ranker_width, temp_line, int, 2)
data.extend(temp_data)
temp_data, temp_line = get_fields(section_width, temp_line, str)
temp_data, temp_line = get_fields(float_width, temp_line, float, 2)
data.extend(temp_data)
temp_data, temp_line = get_fields(float_2_width, temp_line, float)
data.extend(temp_data)
# print zip(header[:len(data)], data)
# print temp_line
return data
with open(input_file_name, 'r') as input_file:
for line_number, line in enumerate(input_file):
if line_number == 0:
header = map(lambda s: s.strip().strip(','), line.split())
df_header = list()
for f in header:
if f not in df_header:
df_header.append(f)
df_dict = dict([(f, list()) for f in df_header])
continue
# skip empty lines
if line.strip() == '':
continue
# Check for a duplicate header line
duplicate_header = map(lambda s: s.strip().strip(','), line.split())
if header == duplicate_header:
continue
data = parse_line(line)
recorded = list()
for f, x in zip(header, data):
if f not in recorded:
df_dict[f].append(x)
recorded.append(f)
df = pandas.DataFrame(df_dict)
ranker_list = sorted(list(set(df.columns) - set(special_columns)))
feature_list = list(special_columns) + ranker_list
for ranker in ranker_list:
df[ranker] = df[ranker].fillna(df['Median'])
df[feature_list][:5]
output_file = 'data/rankings/{}_composite_rankings.clean.csv'.format(season)
df[feature_list].to_csv(output_file, sep='|')
| 0.201342 | 0.185873 |
```
%%writefile lyrics.txt
Look, I was gonna go easy on you not to hurt your feelings
But I'm only going to get this one chance
(Six minutes, six minutes)
Something's wrong, I can feel it
(Six minutes, six minutes, Slim Shady, you're on)
Just a feeling I've got
Like something's about to happen
But I don't know what
If that means, what I think it means, we're in trouble
Big trouble. And if he is as bananas as you say
I'm not taking any chances
You are just what the doc ordered
I'm beginning to feel like a Rap God, Rap God
All my people from the front to the back nod, back nod
Now who thinks their arms are long enough to slap box, slap box?
They said I rap like a robot, so call me rap-bot
But for me to rap like a computer must be in my genes
I got a laptop in my back pocket
My pen'll go off when I half-cock it
Got a fat knot from that rap profit
Made a living and a killing off it
Ever since Bill Clinton was still in office
With Monica Lewinsky feeling on his nutsack
I'm an MC still as honest
But as rude and as indecent as all hell
Syllables, skill-a-holic (Kill 'em all with)
This flippity, dippity-hippity hip-hop
You don't really wanna get into a pissing match
With this rappity brat
Packing a MAC in the back of the Ac
Backpack rap, crap, yap-yap, yackety-yack
And at the exact same time
I attempt these lyrical acrobat stunts while I'm practicing that
I'll still be able to break a motherfuckin' table
Over the back of a couple of faggots and crack it in half
Only realized it was ironic
I was signed to Aftermath after the fact
How could I not blow? All I do is drop "F" bombs
Feel my wrath of attack
Rappers are having a rough time period
Here's a maxi pad
It's actually disastrously bad
For the wack while I'm masterfully constructing this masterpiece yeah
'Cause I'm beginning to feel like a Rap God, Rap God
All my people from the front to the back nod, back nod
Now who thinks their arms are long enough to slap box, slap box?
Let me show you maintaining this shit ain't that hard, that hard
Everybody want the key and the secret to rap
Immortality like I have got
Well, to be truthful the blueprint's
Simply rage and youthful exuberance
Everybody loves to root for a nuisance
Hit the Earth like an asteroid
Did nothing but shoot for the moon since (pew)
MCs get taken to school with this music
'Cause I use it as a vehicle to 'bust a rhyme'
Now I lead a new school full of students
Me? I'm a product of Rakim
Lakim Shabazz, 2Pac, N.W.A., Cube, hey, Doc, Ren
Yella, Eazy, thank you, they got Slim
Inspired enough to one day grow up
Blow up and be in a position
To meet Run-D.M.C. and induct them
Into the motherfuckin' Rock n'
Roll Hall of Fame even though I walk in the church
And burst in a ball of flames
Only Hall of Fame I'll be inducted in is the alcohol of fame
On the wall of shame
You fags think it's all a game
'Til I walk a flock of flames
Off a plank and
Tell me what in the fuck are you thinking?
Little gay-looking boy
So gay I can barely say it with a 'straight' face, looking boy
You're witnessing a mass-occur like you're watching a church gathering take place, looking boy
Oy vey, that boy's gay
That's all they say, looking boy
You get a thumbs up, pat on the back
And a "way to go" from your label every day, looking boy
Hey, looking boy, what d'you say, looking boy?
I get a "hell yeah" from Dre, looking boy
I'mma work for everything I have
Never asked nobody for shit
Get outta my face, looking boy
Basically boy you're never gonna be capable
Of keeping up with the same pace, looking boy, 'cause
I'm beginning to feel like a Rap God, Rap God
All my people from the front to the back nod, back nod
The way I'm racing around the track, call me NASCAR, NASCAR
Dale Earnhardt of the trailer park, the White Trash God
Kneel before General Zod this planet's Krypton, no Asgard, Asgard
So you'll be Thor and I'll be Odin
You're rodent, I'm omnipotent
Let off then I'm reloading
Immediately with these bombs I'm totin'
And I should not be woken
I'm the walking dead
But I'm just a talking head, a zombie floating
But I got your mom deep-throating
I'm out my Ramen Noodle
We have nothing in common, poodle
I'm a Doberman, pinch yourself
In the arm and pay homage, pupil
It's me
My honesty's brutal
But it's honestly futile if I don't utilize
What I do though for good
At least once in a while so I wanna make sure
Somewhere in this chicken scratch I scribble and doodle
Enough rhymes to
Maybe try to help get some people through tough times
But I gotta keep a few punchlines
Just in case 'cause even you unsigned
Rappers are hungry looking at me like it's lunchtime
I know there was a time where once I
Was king of the underground
But I still rap like I'm on my Pharoahe Monch grind
So I crunch rhymes
But sometimes when you combine
Appeal with the skin color of mine
You get too big and here they come trying to
Censor you like that one line I said
On "I'm Back" from The Mathers LP
One when I tried to say I'll take seven kids from Columbine
Put 'em all in a line
Add an AK-47, a revolver and a nine
See if I get away with it now
That I ain't as big as I was, but I'm
Morphin' into an immortal coming through the portal
You're stuck in a time warp from two thousand four though
And I don't know what the fuck that you rhyme for
You're pointless as Rapunzel
With fucking cornrows
You write normal? Fuck being normal
And I just bought a new ray gun from the future
Just to come and shoot ya
Like when Fabolous made Ray J mad
'Cause Fab said he looked like a fag
At Mayweather's pad singin' to a man
While he played piano
Man, oh man, that was a 24/7 special
On the cable channel
So Ray J went straight to the radio station the very next day
"Hey, Fab, I'mma kill you"
Lyrics coming at you with supersonic speed, (JJ Fad)
Uh, summa lumma dooma lumma you assuming I'm a human
What I gotta do to get it through to you? I'm superhuman
Innovative and I'm made of rubber, so that anything you say is
Ricocheting off of me and it'll glue to you
I'm devastating more than ever demonstrating
How to give a motherfuckin' audience a feeling like it's levitating
Never fading, and I know that haters are forever waiting
For the day that they can say I fell off, they'll be celebrating
'Cause I know the way to get 'em motivated
I make elevating music
You make elevator music
"Oh, he's too mainstream."
Well, that's what they do
When they get jealous, they confuse it
"It's not hip-hop, it's pop."
'Cause I found a hella way to fuse it
With rock, shock rap with Doc
Throw on "Lose Yourself" and make 'em lose it
"I don't know how to make songs like that
I don't know what words to use."
Let me know when it occurs to you
While I'm ripping any one of these verses that versus you
It's curtains, I'm inadvertently hurtin' you
How many verses I gotta murder to
Prove that if you were half as nice
Your songs you could sacrifice virgins to
Unghh, school flunky, pill junky
But look at the accolades these skills brung me
Full of myself, but still hungry
I bully myself 'cause I make me do what I put my mind to
And I'm a million leagues above you
Ill when I speak in tongues
But it's still tongue-in-cheek, fuck you
I'm drunk. So, Satan, take the fucking wheel
I'm asleep in the front seat
Bumping Heavy D and the Boyz
Still "Chunky, but Funky"
But in my head there's something
I can feel tugging and struggling
Angels fight with devils and
Here's what they want from me
They're asking me to eliminate some of the women hate
But if you take into consideration the bitter hatred I had
Then you may be a little patient and more sympathetic to the situation
And understand the discrimination
But fuck it
Life's handing you lemons
Make lemonade then
But if I can't batter the women
How the fuck am I supposed to bake them a cake then?
Don't mistake him for Satan
It's a fatal mistake if you think I need to be overseas
And take a vacation to trip a broad
And make her fall on her face and
Don't be a retard, be a king?
Think not
Why be a king when you can be a god?
def read_file(filepath):
with open(filepath) as f:
str_text = f.read()
return str_text
my_file=read_file('lyrics.txt')
my_file
# Now removing punctuation and extra spaces
import spacy
import string
nlp=spacy.load('en_core_web_sm',disable=['parser','ner','tagger'])
def remove_punc(text):
return [token.text.lower() for token in nlp(text) if token.text not in set(string.punctuation) and token.text not in '\n\n \n\n\n!"-#$%&()--.*+,-/:;<=>?@[\\]^_`{|}~\t\n']
tokenized=remove_punc(my_file)
tokenized
train_seq=26
sequences=[]
for i in range(train_seq,len(tokenized)):
sentence=tokenized[i-train_seq:i]
sequences.append(sentence)
sequences
" ".join(sequences[0])
" ".join(sequences[50])
" ".join(sequences[98])
import tensorflow.keras as keras
from keras.preprocessing.text import Tokenizer
from tensorflow.keras import Sequential
from keras.layers import Dense,LSTM,Embedding
from tensorflow.keras.utils import to_categorical
tokenizer=Tokenizer()
tokenizer.fit_on_texts(sequences)
sequences=tokenizer.texts_to_sequences(sequences)
vocab_size=len(tokenizer.word_counts)
vocab_size
import numpy as np
seq=np.array(sequences)
seq.shape
def create_model(vocab_size,seq_len):
model=Sequential()
model.add(Embedding(vocab_size, 25, input_length=seq_len))
model.add(LSTM(150,return_sequences=True))
model.add(LSTM(150))
model.add(Dense(150,activation='relu'))
model.add(Dense(vocab_size,activation='softmax'))
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
model.summary()
return model
X=seq[:,:-1]
Y=seq[:,-1]
y=to_categorical(Y,num_classes=vocab_size+1)
y.shape
seq_length=X.shape[1]
seq_length
model=create_model(vocab_size+1,seq_length)
model.fit(X,y,epochs=300,batch_size=64)
from keras.preprocessing.sequence import pad_sequences
!pip install tensorflow==2.5.2
def predict(model,tokenizer,seq_len,seed_text,num_gen_word):
input_text=seed_text
output_text=[]
for i in range(num_gen_word):
encoded_text=tokenizer.texts_to_sequences([input_text])[0]
pad_encoded=pad_sequences([encoded_text],maxlen=seq_len,truncating='pre')
pred_word_ind= model.predict(pad_encoded)[0]
classes_x=np.argmax(pred_word_ind,axis=-1)
pred_word=tokenizer.index_word[classes_x]
input_text+=" "+pred_word
output_text.append(pred_word)
return " ".join(output_text)
predict(model,tokenizer,seq_length," ".join(sequences[10]),300)
" ".join(sequences[0])
encoded_text=tokenizer.texts_to_sequences(" ".join(sequences[0]))[0]
pad_encoded=pad_sequences([encoded_text],maxlen=seq_length,truncating='pre')
pred_word_ind= model.predict(pad_encoded)[0]
pred_word_ind
classes_x=np.argmax(pred_word_ind,axis=-1)
classes_x
tokenizer.index_word[classes_x]
```
|
github_jupyter
|
%%writefile lyrics.txt
Look, I was gonna go easy on you not to hurt your feelings
But I'm only going to get this one chance
(Six minutes, six minutes)
Something's wrong, I can feel it
(Six minutes, six minutes, Slim Shady, you're on)
Just a feeling I've got
Like something's about to happen
But I don't know what
If that means, what I think it means, we're in trouble
Big trouble. And if he is as bananas as you say
I'm not taking any chances
You are just what the doc ordered
I'm beginning to feel like a Rap God, Rap God
All my people from the front to the back nod, back nod
Now who thinks their arms are long enough to slap box, slap box?
They said I rap like a robot, so call me rap-bot
But for me to rap like a computer must be in my genes
I got a laptop in my back pocket
My pen'll go off when I half-cock it
Got a fat knot from that rap profit
Made a living and a killing off it
Ever since Bill Clinton was still in office
With Monica Lewinsky feeling on his nutsack
I'm an MC still as honest
But as rude and as indecent as all hell
Syllables, skill-a-holic (Kill 'em all with)
This flippity, dippity-hippity hip-hop
You don't really wanna get into a pissing match
With this rappity brat
Packing a MAC in the back of the Ac
Backpack rap, crap, yap-yap, yackety-yack
And at the exact same time
I attempt these lyrical acrobat stunts while I'm practicing that
I'll still be able to break a motherfuckin' table
Over the back of a couple of faggots and crack it in half
Only realized it was ironic
I was signed to Aftermath after the fact
How could I not blow? All I do is drop "F" bombs
Feel my wrath of attack
Rappers are having a rough time period
Here's a maxi pad
It's actually disastrously bad
For the wack while I'm masterfully constructing this masterpiece yeah
'Cause I'm beginning to feel like a Rap God, Rap God
All my people from the front to the back nod, back nod
Now who thinks their arms are long enough to slap box, slap box?
Let me show you maintaining this shit ain't that hard, that hard
Everybody want the key and the secret to rap
Immortality like I have got
Well, to be truthful the blueprint's
Simply rage and youthful exuberance
Everybody loves to root for a nuisance
Hit the Earth like an asteroid
Did nothing but shoot for the moon since (pew)
MCs get taken to school with this music
'Cause I use it as a vehicle to 'bust a rhyme'
Now I lead a new school full of students
Me? I'm a product of Rakim
Lakim Shabazz, 2Pac, N.W.A., Cube, hey, Doc, Ren
Yella, Eazy, thank you, they got Slim
Inspired enough to one day grow up
Blow up and be in a position
To meet Run-D.M.C. and induct them
Into the motherfuckin' Rock n'
Roll Hall of Fame even though I walk in the church
And burst in a ball of flames
Only Hall of Fame I'll be inducted in is the alcohol of fame
On the wall of shame
You fags think it's all a game
'Til I walk a flock of flames
Off a plank and
Tell me what in the fuck are you thinking?
Little gay-looking boy
So gay I can barely say it with a 'straight' face, looking boy
You're witnessing a mass-occur like you're watching a church gathering take place, looking boy
Oy vey, that boy's gay
That's all they say, looking boy
You get a thumbs up, pat on the back
And a "way to go" from your label every day, looking boy
Hey, looking boy, what d'you say, looking boy?
I get a "hell yeah" from Dre, looking boy
I'mma work for everything I have
Never asked nobody for shit
Get outta my face, looking boy
Basically boy you're never gonna be capable
Of keeping up with the same pace, looking boy, 'cause
I'm beginning to feel like a Rap God, Rap God
All my people from the front to the back nod, back nod
The way I'm racing around the track, call me NASCAR, NASCAR
Dale Earnhardt of the trailer park, the White Trash God
Kneel before General Zod this planet's Krypton, no Asgard, Asgard
So you'll be Thor and I'll be Odin
You're rodent, I'm omnipotent
Let off then I'm reloading
Immediately with these bombs I'm totin'
And I should not be woken
I'm the walking dead
But I'm just a talking head, a zombie floating
But I got your mom deep-throating
I'm out my Ramen Noodle
We have nothing in common, poodle
I'm a Doberman, pinch yourself
In the arm and pay homage, pupil
It's me
My honesty's brutal
But it's honestly futile if I don't utilize
What I do though for good
At least once in a while so I wanna make sure
Somewhere in this chicken scratch I scribble and doodle
Enough rhymes to
Maybe try to help get some people through tough times
But I gotta keep a few punchlines
Just in case 'cause even you unsigned
Rappers are hungry looking at me like it's lunchtime
I know there was a time where once I
Was king of the underground
But I still rap like I'm on my Pharoahe Monch grind
So I crunch rhymes
But sometimes when you combine
Appeal with the skin color of mine
You get too big and here they come trying to
Censor you like that one line I said
On "I'm Back" from The Mathers LP
One when I tried to say I'll take seven kids from Columbine
Put 'em all in a line
Add an AK-47, a revolver and a nine
See if I get away with it now
That I ain't as big as I was, but I'm
Morphin' into an immortal coming through the portal
You're stuck in a time warp from two thousand four though
And I don't know what the fuck that you rhyme for
You're pointless as Rapunzel
With fucking cornrows
You write normal? Fuck being normal
And I just bought a new ray gun from the future
Just to come and shoot ya
Like when Fabolous made Ray J mad
'Cause Fab said he looked like a fag
At Mayweather's pad singin' to a man
While he played piano
Man, oh man, that was a 24/7 special
On the cable channel
So Ray J went straight to the radio station the very next day
"Hey, Fab, I'mma kill you"
Lyrics coming at you with supersonic speed, (JJ Fad)
Uh, summa lumma dooma lumma you assuming I'm a human
What I gotta do to get it through to you? I'm superhuman
Innovative and I'm made of rubber, so that anything you say is
Ricocheting off of me and it'll glue to you
I'm devastating more than ever demonstrating
How to give a motherfuckin' audience a feeling like it's levitating
Never fading, and I know that haters are forever waiting
For the day that they can say I fell off, they'll be celebrating
'Cause I know the way to get 'em motivated
I make elevating music
You make elevator music
"Oh, he's too mainstream."
Well, that's what they do
When they get jealous, they confuse it
"It's not hip-hop, it's pop."
'Cause I found a hella way to fuse it
With rock, shock rap with Doc
Throw on "Lose Yourself" and make 'em lose it
"I don't know how to make songs like that
I don't know what words to use."
Let me know when it occurs to you
While I'm ripping any one of these verses that versus you
It's curtains, I'm inadvertently hurtin' you
How many verses I gotta murder to
Prove that if you were half as nice
Your songs you could sacrifice virgins to
Unghh, school flunky, pill junky
But look at the accolades these skills brung me
Full of myself, but still hungry
I bully myself 'cause I make me do what I put my mind to
And I'm a million leagues above you
Ill when I speak in tongues
But it's still tongue-in-cheek, fuck you
I'm drunk. So, Satan, take the fucking wheel
I'm asleep in the front seat
Bumping Heavy D and the Boyz
Still "Chunky, but Funky"
But in my head there's something
I can feel tugging and struggling
Angels fight with devils and
Here's what they want from me
They're asking me to eliminate some of the women hate
But if you take into consideration the bitter hatred I had
Then you may be a little patient and more sympathetic to the situation
And understand the discrimination
But fuck it
Life's handing you lemons
Make lemonade then
But if I can't batter the women
How the fuck am I supposed to bake them a cake then?
Don't mistake him for Satan
It's a fatal mistake if you think I need to be overseas
And take a vacation to trip a broad
And make her fall on her face and
Don't be a retard, be a king?
Think not
Why be a king when you can be a god?
def read_file(filepath):
with open(filepath) as f:
str_text = f.read()
return str_text
my_file=read_file('lyrics.txt')
my_file
# Now removing punctuation and extra spaces
import spacy
import string
nlp=spacy.load('en_core_web_sm',disable=['parser','ner','tagger'])
def remove_punc(text):
return [token.text.lower() for token in nlp(text) if token.text not in set(string.punctuation) and token.text not in '\n\n \n\n\n!"-#$%&()--.*+,-/:;<=>?@[\\]^_`{|}~\t\n']
tokenized=remove_punc(my_file)
tokenized
train_seq=26
sequences=[]
for i in range(train_seq,len(tokenized)):
sentence=tokenized[i-train_seq:i]
sequences.append(sentence)
sequences
" ".join(sequences[0])
" ".join(sequences[50])
" ".join(sequences[98])
import tensorflow.keras as keras
from keras.preprocessing.text import Tokenizer
from tensorflow.keras import Sequential
from keras.layers import Dense,LSTM,Embedding
from tensorflow.keras.utils import to_categorical
tokenizer=Tokenizer()
tokenizer.fit_on_texts(sequences)
sequences=tokenizer.texts_to_sequences(sequences)
vocab_size=len(tokenizer.word_counts)
vocab_size
import numpy as np
seq=np.array(sequences)
seq.shape
def create_model(vocab_size,seq_len):
model=Sequential()
model.add(Embedding(vocab_size, 25, input_length=seq_len))
model.add(LSTM(150,return_sequences=True))
model.add(LSTM(150))
model.add(Dense(150,activation='relu'))
model.add(Dense(vocab_size,activation='softmax'))
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
model.summary()
return model
X=seq[:,:-1]
Y=seq[:,-1]
y=to_categorical(Y,num_classes=vocab_size+1)
y.shape
seq_length=X.shape[1]
seq_length
model=create_model(vocab_size+1,seq_length)
model.fit(X,y,epochs=300,batch_size=64)
from keras.preprocessing.sequence import pad_sequences
!pip install tensorflow==2.5.2
def predict(model,tokenizer,seq_len,seed_text,num_gen_word):
input_text=seed_text
output_text=[]
for i in range(num_gen_word):
encoded_text=tokenizer.texts_to_sequences([input_text])[0]
pad_encoded=pad_sequences([encoded_text],maxlen=seq_len,truncating='pre')
pred_word_ind= model.predict(pad_encoded)[0]
classes_x=np.argmax(pred_word_ind,axis=-1)
pred_word=tokenizer.index_word[classes_x]
input_text+=" "+pred_word
output_text.append(pred_word)
return " ".join(output_text)
predict(model,tokenizer,seq_length," ".join(sequences[10]),300)
" ".join(sequences[0])
encoded_text=tokenizer.texts_to_sequences(" ".join(sequences[0]))[0]
pad_encoded=pad_sequences([encoded_text],maxlen=seq_length,truncating='pre')
pred_word_ind= model.predict(pad_encoded)[0]
pred_word_ind
classes_x=np.argmax(pred_word_ind,axis=-1)
classes_x
tokenizer.index_word[classes_x]
| 0.034205 | 0.342654 |
<h1>Simulator</h1>
<p>This notebook is meant to demonstrate the functionality of the simulator. I will provide a series of code examples along with visualizations to demonstrate what it enables</p>
<h2>Import packages</h2>
```
import os
from PIL import Image
import numpy as np
import trimesh
import warnings
# warnings.filterwarnings("ignore")
import meshplot as mp
from pterotactyl.simulator.scene import sampler
from pterotactyl.simulator.physics import grasping
from pterotactyl.utility import utils
import pterotactyl.objects as objects
```
<h2>Select object</h2>
```
OBJ_LOCATION = os.path.join(os.path.dirname(objects.__file__), "test_objects/0")
batch = [OBJ_LOCATION]
```
<h2>Visualize object to be touched (OPTIONAL)</h2>
```
verts, faces = utils.load_mesh_touch(OBJ_LOCATION + '.obj')
plot = mp.plot(verts.data.cpu().numpy(), faces.data.cpu().numpy())
```
<h2> Start the simulator and load the batch </h2>
Here we setup the grasping environment, indicating that the batchsize is 1, that we want vision signals outputted, and that the resolution of the images should be 256 by 256. We then load the object into the simulator, and set the object scale to be 1/2.6 .
```
s = sampler.Sampler(grasping.Agnostic_Grasp, bs=1, vision=True, resolution = [256, 256])
s.load_objects(batch, from_dataset=False, scale = 2.6)
```
<h2> Perform an action </h2>
```
action = [30]
parameters = [[[.3, .3, .3], [60, 0, 135]]]
signals = s.sample(action, touch=True, touch_point_cloud=True, vision=True, vision_occluded=True,parameters=parameters )
```
<h2> Observe results </h2>
```
img_vision = Image.fromarray(signals["vision"][0])
display(img_vision)
img_vision_grasp = Image.fromarray(signals["vision_occluded"][0])
display(img_vision_grasp)
image = np.zeros((121*4, 121*2, 3)).astype(np.uint8)
for i in range(4):
print(f'Finger {i} has status {signals["touch_status"][0][i]}')
touch = signals["touch_signal"][0][i].data.numpy().astype(np.uint8)
image[i*121:i*121+121, :121] = touch
depth = utils.visualize_depth(signals["depths"][0][i].data.numpy()).reshape(121, 121, 1)
image[i*121:i*121+121, 121:] = depth
print(' ')
print(' TOUCH DEPTH')
display(Image.fromarray(image))
```
<h2> Visualize the touches </h2>
```
points = []
plot = mp.plot(verts.data.cpu().numpy(), faces.data.cpu().numpy())
for p in signals["touch_point_cloud"][0]:
if p.shape[0] >0:
points.append(p)
points = np.concatenate(points).reshape(-1,3)
plot.add_points(points, c=points.sum(axis=1), shading={ "point_size": 0.03})
```
<h2> Perfrom new actions </h2>
```
action = [40]
parameters = [[[0.35, -0.35, 0.3], [60, 0, 45]]]
signals = s.sample(action, touch=True, touch_point_cloud=False, vision=True, vision_occluded=True,parameters=parameters )
```
<h2> Observe new results </h2>
```
img_vision = Image.fromarray(signals["vision"][0])
display(img_vision)
img_vision_grasp = Image.fromarray(signals["vision_occluded"][0])
display(img_vision_grasp)
image = np.zeros((121*4, 121*2, 3)).astype(np.uint8)
for i in range(4):
print(f'Finger {i} has status {signals["touch_status"][0][i]}')
touch = signals["touch_signal"][0][i].data.numpy().astype(np.uint8)
image[i*121:i*121+121, :121] = touch
depth = utils.visualize_depth(signals["depths"][0][i].data.numpy()).reshape(121, 121, 1)
image[i*121:i*121+121, 121:] = depth
print(' ')
print(' TOUCH DEPTH')
display(Image.fromarray(image))
```
|
github_jupyter
|
import os
from PIL import Image
import numpy as np
import trimesh
import warnings
# warnings.filterwarnings("ignore")
import meshplot as mp
from pterotactyl.simulator.scene import sampler
from pterotactyl.simulator.physics import grasping
from pterotactyl.utility import utils
import pterotactyl.objects as objects
OBJ_LOCATION = os.path.join(os.path.dirname(objects.__file__), "test_objects/0")
batch = [OBJ_LOCATION]
verts, faces = utils.load_mesh_touch(OBJ_LOCATION + '.obj')
plot = mp.plot(verts.data.cpu().numpy(), faces.data.cpu().numpy())
s = sampler.Sampler(grasping.Agnostic_Grasp, bs=1, vision=True, resolution = [256, 256])
s.load_objects(batch, from_dataset=False, scale = 2.6)
action = [30]
parameters = [[[.3, .3, .3], [60, 0, 135]]]
signals = s.sample(action, touch=True, touch_point_cloud=True, vision=True, vision_occluded=True,parameters=parameters )
img_vision = Image.fromarray(signals["vision"][0])
display(img_vision)
img_vision_grasp = Image.fromarray(signals["vision_occluded"][0])
display(img_vision_grasp)
image = np.zeros((121*4, 121*2, 3)).astype(np.uint8)
for i in range(4):
print(f'Finger {i} has status {signals["touch_status"][0][i]}')
touch = signals["touch_signal"][0][i].data.numpy().astype(np.uint8)
image[i*121:i*121+121, :121] = touch
depth = utils.visualize_depth(signals["depths"][0][i].data.numpy()).reshape(121, 121, 1)
image[i*121:i*121+121, 121:] = depth
print(' ')
print(' TOUCH DEPTH')
display(Image.fromarray(image))
points = []
plot = mp.plot(verts.data.cpu().numpy(), faces.data.cpu().numpy())
for p in signals["touch_point_cloud"][0]:
if p.shape[0] >0:
points.append(p)
points = np.concatenate(points).reshape(-1,3)
plot.add_points(points, c=points.sum(axis=1), shading={ "point_size": 0.03})
action = [40]
parameters = [[[0.35, -0.35, 0.3], [60, 0, 45]]]
signals = s.sample(action, touch=True, touch_point_cloud=False, vision=True, vision_occluded=True,parameters=parameters )
img_vision = Image.fromarray(signals["vision"][0])
display(img_vision)
img_vision_grasp = Image.fromarray(signals["vision_occluded"][0])
display(img_vision_grasp)
image = np.zeros((121*4, 121*2, 3)).astype(np.uint8)
for i in range(4):
print(f'Finger {i} has status {signals["touch_status"][0][i]}')
touch = signals["touch_signal"][0][i].data.numpy().astype(np.uint8)
image[i*121:i*121+121, :121] = touch
depth = utils.visualize_depth(signals["depths"][0][i].data.numpy()).reshape(121, 121, 1)
image[i*121:i*121+121, 121:] = depth
print(' ')
print(' TOUCH DEPTH')
display(Image.fromarray(image))
| 0.199074 | 0.934215 |
```
import pandas as pd
import numpy as np
import pathlib
import os
%matplotlib inline
from matplotlib import pyplot as plt
from matplotlib import cm
import seaborn as sns
COLUMNS_NO_FEATURES = ['id', 'tile', 'cnt', 'ra_k', 'dec_k', 'vs_type', 'vs_catalog', 'cls']
PATH = pathlib.Path(os.path.abspath(os.path.dirname("")))
DATA_PATH = PATH / "bin"
for d in DATA_PATH.glob("*.pkl.bz2"):
tile = d.name.split(".")[0]
original = pd.read_pickle(d)
new_path = f"/home/jbcabral/carpyncho3/production_data/stored/light_curves/{tile}/features_{tile}.npy"
new = pd.DataFrame(np.load(new_path, allow_pickle=True))
new = new[new.id.isin(original.id)]
new["vs_type"] = new.vs_type.str.decode("utf-8")
new["cls"] = new.vs_type.apply(lambda t: 1 if t != "" else 0)
new["tile"] = new.id.apply(lambda i: "b" + str(i)[1:4])
# break
new.to_pickle(d, compression="bz2")
print(d)
df = pd.read_pickle("_data/s20k.pkl.bz2")
for x in df.columns:
if df[x].dtype == object:
continue
if np.isinf(df[x].values).sum():
print(x)
print (df[np.isinf(df.Period_fit.values)].shape)
print( df[np.isinf(df.Period_fit.values)].vs_type.unique())
for d in DATA_PATH.glob("*.pkl.bz2"):
df = pd.read_pickle(d)
break
import matplotlib.pyplot as plt
%matplotlib inline
import pandas as pd
df = pd.read_pickle("_data/sO2O.pkl.bz2")
import seaborn as sns
df =df.drop(["StetsonK"], axis=1)
X_columns = [c for c in df.columns if c not in ['id', 'tile', 'cnt', 'ra_k', 'dec_k', 'vs_type', 'vs_catalog', 'cls'] ]
plt.scatter(df.Meanvariance, df.Std)
from scipy import stats
from scipy.cluster.hierarchy import dendrogram, linkage
from scipy.spatial.distance import squareform
# aca cargo los datos
all_data = pd.concat(data.values(), ignore_index=True)
# aca le calculo la correlacion a todo
corr = all_data[X_columns].corr(method="pearson").values
corr = np.abs(np.tril(corr, -1))
# todo lo que es menos .9 lo plancho a 0
corr[corr < .9] = 0
# calculo las distancias con el 1 - triangulo inf (tril)
distances = linkage(1-corr, method="single", optimal_ordering=True)
# grafico el dendograma
fig, ax = plt.subplots(figsize=(15, 8))
ddata = dendrogram(distances, labels=X_columns, ax=ax);
plt.tight_layout()
# fig.savefig("plots/s_features/dendro.pdf")
import pandas as pd
pd.read_pickle("_data/full.pkl.bz2").Mean.describe()
df.Mean.describe()
df=pd.read_pickle("_data/full.pkl.bz2")
def sample(df, **k):
sample = []
for t, g in df.groupby("tile"):
rr = g[g.cls == 1]
unk = g[g.cls == 0].sample(**k)
sample.extend([rr, unk])
return pd.concat(sample)
def sample_o2o(df):
sample = []
for t, g in df.groupby("tile"):
rr = g[g.cls == 1]
unk = g[g.cls == 0].sample(len(rr))
sample.extend([rr, unk])
return pd.concat(sample)
s10p = sample(df, frac=.1)
s20k = sample(s10p, n=20000)
s5k = sample(s20k, n=5000)
s2_5k = sample(s5k, n=2500)
sO2O = sample_o2o(s2_5k)
import joblib
joblib.dump({
0.1: s10p.id.values,
20000: s20k.id.values,
5000: s5k.id.values,
2500: s2_5k.id.values,
'O2O': sO2O.id.values
}, "bin/sampleids3.pkl")
df.saple(frac=.10).shape
df.sample(frac=.10).shape
joblib.load("bin/sampleids.pkl")
import pandas as pd
df=pd.read_pickle("_data/full.pkl.bz2")
import numpy as np
import pandas as pd
from PyAstronomy.pyasl import foldAt
import feets.preprocess
lc = np.load("/home/jbcabral/carpyncho3/production_data/stored/light_curves/b278/lc_obs_b278.npy")
lc = lc[(lc["bm_src_id"] == 32780000002917) | (lc["bm_src_id"] == 32780000005228)]
lc = pd.DataFrame(lc)
def get_ts(df, id):
ts = df[df.bm_src_id == id]
time, mag, err = (
ts.pwp_stack_src_hjd.values,
ts.pwp_stack_src_mag3.values,
ts.pwp_stack_src_mag_err3.values)
sort = np.argsort(time)
time, mag, err = time[sort], mag[sort], err[sort]
print(len(time))
time, mag, err = feets.preprocess.remove_noise(time, mag, err, std_limit=3)
print(len(time))
return time, mag, err
def plot_lc(tax, pax, sid):
time, mag, err = get_ts(lc, sid)
tax.errorbar(time, mag, err, ls="", marker="o", ecolor="k")
tax.set_title(f"Source '{sid}' in time")
tax.set_xlabel("Time")
tax.set_ylabel("Magnitude")
tax.invert_yaxis()
t0 = np.min(mag)
period = df[df.id == sid].PeriodLS.values[0]
phases = foldAt(time, period, T0=t0)
sort = np.argsort(phases)
phases, pmag, perr = phases[sort], mag[sort], err[sort]
phases = np.hstack((phases, phases + 1))
pmag = np.hstack((pmag, pmag))
perr = np.hstack((perr, perr))
pax.errorbar(phases, pmag, perr, ls="", marker="o", ecolor="k", color="r")
pax.set_title(f"Source '{sid}' in phase - Period={period}, t0={t0}")
pax.set_xlabel("Phase")
pax.set_ylabel("Magnitude")
pax.invert_yaxis()
sid = 32780000002917
fig, axs = plt.subplots(1, 2, figsize=(20, 4))
plot_lc(*axs, sid=sid)
fig.tight_layout()
sid = 32780000005228
fig, axs = plt.subplots(1, 2, figsize=(20, 4))
plot_lc(*axs, sid=sid)
fig.tight_layout()
df2 = pd.read_pickle("_data/full_scaled.pkl.bz2")
df[df.id.isin([32780000002917, 32780000005228])].to_csv("to_pablo.csv", index=False)
df2[df2.id.isin([32780000002917, 32780000005228])]["id Period_fit Psi_eta PeriodLS Psi_CS Skew n09_jh_color Mean".split()]
Period_fit Psi_eta PeriodLS Psi_CS Skew n09_jh_color Mean
10384 0.3289593 -0.2615279 -0.08669450 1.736924 0.06582776 -1.163823 0.5229311
10389 -3.3952236 -2.6802177 -0.08656242 1.639955 0.05954095 -1.089007 -0.5671582
df3 = pd.read_pickle("_data/s5k.pkl.bz2").to_csv("to_pablo5k.csv.bz2", index=False, compression="bz2")
```
|
github_jupyter
|
import pandas as pd
import numpy as np
import pathlib
import os
%matplotlib inline
from matplotlib import pyplot as plt
from matplotlib import cm
import seaborn as sns
COLUMNS_NO_FEATURES = ['id', 'tile', 'cnt', 'ra_k', 'dec_k', 'vs_type', 'vs_catalog', 'cls']
PATH = pathlib.Path(os.path.abspath(os.path.dirname("")))
DATA_PATH = PATH / "bin"
for d in DATA_PATH.glob("*.pkl.bz2"):
tile = d.name.split(".")[0]
original = pd.read_pickle(d)
new_path = f"/home/jbcabral/carpyncho3/production_data/stored/light_curves/{tile}/features_{tile}.npy"
new = pd.DataFrame(np.load(new_path, allow_pickle=True))
new = new[new.id.isin(original.id)]
new["vs_type"] = new.vs_type.str.decode("utf-8")
new["cls"] = new.vs_type.apply(lambda t: 1 if t != "" else 0)
new["tile"] = new.id.apply(lambda i: "b" + str(i)[1:4])
# break
new.to_pickle(d, compression="bz2")
print(d)
df = pd.read_pickle("_data/s20k.pkl.bz2")
for x in df.columns:
if df[x].dtype == object:
continue
if np.isinf(df[x].values).sum():
print(x)
print (df[np.isinf(df.Period_fit.values)].shape)
print( df[np.isinf(df.Period_fit.values)].vs_type.unique())
for d in DATA_PATH.glob("*.pkl.bz2"):
df = pd.read_pickle(d)
break
import matplotlib.pyplot as plt
%matplotlib inline
import pandas as pd
df = pd.read_pickle("_data/sO2O.pkl.bz2")
import seaborn as sns
df =df.drop(["StetsonK"], axis=1)
X_columns = [c for c in df.columns if c not in ['id', 'tile', 'cnt', 'ra_k', 'dec_k', 'vs_type', 'vs_catalog', 'cls'] ]
plt.scatter(df.Meanvariance, df.Std)
from scipy import stats
from scipy.cluster.hierarchy import dendrogram, linkage
from scipy.spatial.distance import squareform
# aca cargo los datos
all_data = pd.concat(data.values(), ignore_index=True)
# aca le calculo la correlacion a todo
corr = all_data[X_columns].corr(method="pearson").values
corr = np.abs(np.tril(corr, -1))
# todo lo que es menos .9 lo plancho a 0
corr[corr < .9] = 0
# calculo las distancias con el 1 - triangulo inf (tril)
distances = linkage(1-corr, method="single", optimal_ordering=True)
# grafico el dendograma
fig, ax = plt.subplots(figsize=(15, 8))
ddata = dendrogram(distances, labels=X_columns, ax=ax);
plt.tight_layout()
# fig.savefig("plots/s_features/dendro.pdf")
import pandas as pd
pd.read_pickle("_data/full.pkl.bz2").Mean.describe()
df.Mean.describe()
df=pd.read_pickle("_data/full.pkl.bz2")
def sample(df, **k):
sample = []
for t, g in df.groupby("tile"):
rr = g[g.cls == 1]
unk = g[g.cls == 0].sample(**k)
sample.extend([rr, unk])
return pd.concat(sample)
def sample_o2o(df):
sample = []
for t, g in df.groupby("tile"):
rr = g[g.cls == 1]
unk = g[g.cls == 0].sample(len(rr))
sample.extend([rr, unk])
return pd.concat(sample)
s10p = sample(df, frac=.1)
s20k = sample(s10p, n=20000)
s5k = sample(s20k, n=5000)
s2_5k = sample(s5k, n=2500)
sO2O = sample_o2o(s2_5k)
import joblib
joblib.dump({
0.1: s10p.id.values,
20000: s20k.id.values,
5000: s5k.id.values,
2500: s2_5k.id.values,
'O2O': sO2O.id.values
}, "bin/sampleids3.pkl")
df.saple(frac=.10).shape
df.sample(frac=.10).shape
joblib.load("bin/sampleids.pkl")
import pandas as pd
df=pd.read_pickle("_data/full.pkl.bz2")
import numpy as np
import pandas as pd
from PyAstronomy.pyasl import foldAt
import feets.preprocess
lc = np.load("/home/jbcabral/carpyncho3/production_data/stored/light_curves/b278/lc_obs_b278.npy")
lc = lc[(lc["bm_src_id"] == 32780000002917) | (lc["bm_src_id"] == 32780000005228)]
lc = pd.DataFrame(lc)
def get_ts(df, id):
ts = df[df.bm_src_id == id]
time, mag, err = (
ts.pwp_stack_src_hjd.values,
ts.pwp_stack_src_mag3.values,
ts.pwp_stack_src_mag_err3.values)
sort = np.argsort(time)
time, mag, err = time[sort], mag[sort], err[sort]
print(len(time))
time, mag, err = feets.preprocess.remove_noise(time, mag, err, std_limit=3)
print(len(time))
return time, mag, err
def plot_lc(tax, pax, sid):
time, mag, err = get_ts(lc, sid)
tax.errorbar(time, mag, err, ls="", marker="o", ecolor="k")
tax.set_title(f"Source '{sid}' in time")
tax.set_xlabel("Time")
tax.set_ylabel("Magnitude")
tax.invert_yaxis()
t0 = np.min(mag)
period = df[df.id == sid].PeriodLS.values[0]
phases = foldAt(time, period, T0=t0)
sort = np.argsort(phases)
phases, pmag, perr = phases[sort], mag[sort], err[sort]
phases = np.hstack((phases, phases + 1))
pmag = np.hstack((pmag, pmag))
perr = np.hstack((perr, perr))
pax.errorbar(phases, pmag, perr, ls="", marker="o", ecolor="k", color="r")
pax.set_title(f"Source '{sid}' in phase - Period={period}, t0={t0}")
pax.set_xlabel("Phase")
pax.set_ylabel("Magnitude")
pax.invert_yaxis()
sid = 32780000002917
fig, axs = plt.subplots(1, 2, figsize=(20, 4))
plot_lc(*axs, sid=sid)
fig.tight_layout()
sid = 32780000005228
fig, axs = plt.subplots(1, 2, figsize=(20, 4))
plot_lc(*axs, sid=sid)
fig.tight_layout()
df2 = pd.read_pickle("_data/full_scaled.pkl.bz2")
df[df.id.isin([32780000002917, 32780000005228])].to_csv("to_pablo.csv", index=False)
df2[df2.id.isin([32780000002917, 32780000005228])]["id Period_fit Psi_eta PeriodLS Psi_CS Skew n09_jh_color Mean".split()]
Period_fit Psi_eta PeriodLS Psi_CS Skew n09_jh_color Mean
10384 0.3289593 -0.2615279 -0.08669450 1.736924 0.06582776 -1.163823 0.5229311
10389 -3.3952236 -2.6802177 -0.08656242 1.639955 0.05954095 -1.089007 -0.5671582
df3 = pd.read_pickle("_data/s5k.pkl.bz2").to_csv("to_pablo5k.csv.bz2", index=False, compression="bz2")
| 0.28398 | 0.344003 |
```
from __future__ import division, absolute_import, print_function
%load_ext autoreload
%autoreload 2
import numpy as np
import tensorflow as tf
tf.__version__
```
# Detecting lines of 5 with a ConvNet and hand-woven features
```
hor=np.zeros([5,5], dtype=float)
hor[2]=1
diag=np.eye(5, dtype=float)
filters = np.array([hor, hor.T, diag, diag[::-1]])
kernel_init = tf.constant_initializer(np.rollaxis(filters, 0, 3))
bias_init = tf.constant_initializer(-4.)
## Take particular note of the shape: Channels last
np.shape(kernel_init.value)
```
### Verifying the function with some examples
```
boards = np.zeros([6, 10, 10])
for i in range(5):
boards[0][5][3+i] = 1.
boards[1][3+i][5] = 1.
boards[2][8-i][3+i] = 1.
boards[3][2+i][2+i] = 1.
boards[4][2+i][2+i] = 1.
boards[5][2+i][2+i] = 1.
boards[0]
inp=tf.constant(boards.reshape(-1,10, 10, 1))
out = tf.layers.conv2d(kernel_size=5, kernel_initializer=kernel_init,
filters=4, inputs=inp, padding='same',
bias_initializer=bias_init, activation='relu')
out = tf.layers.max_pooling2d(inputs=out, pool_size=10, strides=1)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
res = sess.run(out)
res = np.squeeze(np.rollaxis(res, -1, 0))
print(res)
```
You see that every sample (the six columns) has a $1$ at the position that corresponds to the particular pattern that has been recognized.
```
sum(res)
```
### Creating labels with the hand-crafted features
The *labels* graph maps each sample that contains a line of 5 to a $1$, all others to a $0$
```
inp_heuristics = tf.placeholder(name="inp_heuristics", shape=[None, 10, 10, 1], dtype=tf.float32)
out = tf.layers.conv2d(kernel_size=5, kernel_initializer=kernel_init,
filters=4, inputs=inp_heuristics, padding='same',
bias_initializer=bias_init, activation='relu')
out = tf.layers.max_pooling2d(inputs=out, pool_size=10, strides=1)
labels = tf.squeeze(tf.sign(tf.reduce_sum(out, axis=3)))
samples = (np.random.uniform(size = [5, 10,10]) < .3).astype(float).reshape(-1,10,10,1)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
res = sess.run(labels, feed_dict={inp_heuristics: samples})
res
np.rollaxis(samples[3],-1, 0)
def create_samples(sess, placeholder, n=100):
samples = (np.random.uniform(size = [n, 10,10]) < .3).astype(float).reshape(-1,10,10,1)
lbls = sess.run(labels, feed_dict={placeholder: samples})
return samples, lbls
```
### Combining ResNet and Inception Concepts
#### Design considerations
In contrast to Deepmind's network, I'm also using 5x5 filters in an inception [Ref] manner. I use blocks of 3 inception layers with skip connections between the blocks. The skip connections are 1x1 2-filter layers, so that each block's output is mapped into a feature map of 2 channels, which then skips the subsequent block. I'm using a single batch-normalization layer at the end of each block because I accept more risk of overfitting in favour of reducing noise. Gomoku is not about image recognition. The risk of overfitting is come by with zillions of synthetically created boards, anyway.
```
class ResNet:
"""
After sufficient training, this instance of ResNet takes an array of dimensions 10x10
and returns 1 if the array contains the pattern you tought it to recognize.
"""
def __init__(self, n_blocks):
self.inps = tf.placeholder(name="inp_resnet", shape=[None, 10, 10, 1], dtype=tf.float32)
self.lbls = tf.placeholder(name="lbl_resnet", shape=[None, 1], dtype=tf.float32)
out = self.inps
for i in range(n_blocks):
out = self._res_block(out)
out = tf.layers.conv2d(kernel_size=1, filters=1, inputs=out, padding='same', activation='sigmoid')
self.out = tf.reshape(tf.layers.max_pooling2d(inputs=out, pool_size=10, strides=1), [-1, 1])
self.errors = (self.lbls - self.out)**2
self.accuracy=tf.reduce_sum(tf.cast(self.errors < .1, dtype=tf.int64))
self.loss = tf.losses.mean_squared_error(self.out, self.lbls)
self.optimizer = tf.train.AdamOptimizer(learning_rate=1e-4)
self.trainer = self.optimizer.minimize(self.loss)
def _res_block(self, inp, filters=16, activation='elu'):
out1_3 = tf.layers.conv2d(kernel_size=3, filters=filters, inputs=inp, padding='same', activation=activation)
out1_5 = tf.layers.conv2d(kernel_size=5, filters=filters, inputs=inp, padding='same', activation=activation)
out1 = tf.concat([out1_3, out1_5], axis=3)
out2_3 = tf.layers.conv2d(kernel_size=3, filters=filters, inputs=out1, padding='same', activation=activation)
out2_5 = tf.layers.conv2d(kernel_size=5, filters=filters, inputs=out1, padding='same', activation=activation)
out2 = tf.concat([out2_3, out2_5], axis=3)
out3_3 = tf.layers.conv2d(kernel_size=3, filters=filters, inputs=out2, padding='same', activation=activation)
out3_5 = tf.layers.conv2d(kernel_size=5, filters=filters, inputs=out2, padding='same', activation=activation)
out3 = tf.concat([out3_3, out3_5], axis=3)
bn = tf.layers.batch_normalization(inputs=out3)
skip = tf.layers.conv2d(kernel_size=1, filters=2, inputs=inp, padding='same', activation=None)
return tf.concat([skip, bn], axis=3)
resnet = ResNet(10)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for i in range(40001):
smp, lbl = create_samples(sess, inp_heuristics, 500)
lbl = lbl.reshape([-1, 1])
l, o, _ = sess.run([resnet.loss, resnet.out, resnet.trainer], feed_dict={resnet.lbls: lbl, resnet.inps: smp})
if i % 1000 == 0:
print("training loss %s: " % l)
smp, lbl = create_samples(sess, inp_heuristics, 1000)
lbl = lbl.reshape([-1, 1])
acc, pred, err = sess.run([resnet.accuracy, resnet.out, resnet.errors], feed_dict={resnet.inps: smp, resnet.lbls: lbl})
print("Accuracy %s" % acc)
```
|
github_jupyter
|
from __future__ import division, absolute_import, print_function
%load_ext autoreload
%autoreload 2
import numpy as np
import tensorflow as tf
tf.__version__
hor=np.zeros([5,5], dtype=float)
hor[2]=1
diag=np.eye(5, dtype=float)
filters = np.array([hor, hor.T, diag, diag[::-1]])
kernel_init = tf.constant_initializer(np.rollaxis(filters, 0, 3))
bias_init = tf.constant_initializer(-4.)
## Take particular note of the shape: Channels last
np.shape(kernel_init.value)
boards = np.zeros([6, 10, 10])
for i in range(5):
boards[0][5][3+i] = 1.
boards[1][3+i][5] = 1.
boards[2][8-i][3+i] = 1.
boards[3][2+i][2+i] = 1.
boards[4][2+i][2+i] = 1.
boards[5][2+i][2+i] = 1.
boards[0]
inp=tf.constant(boards.reshape(-1,10, 10, 1))
out = tf.layers.conv2d(kernel_size=5, kernel_initializer=kernel_init,
filters=4, inputs=inp, padding='same',
bias_initializer=bias_init, activation='relu')
out = tf.layers.max_pooling2d(inputs=out, pool_size=10, strides=1)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
res = sess.run(out)
res = np.squeeze(np.rollaxis(res, -1, 0))
print(res)
sum(res)
inp_heuristics = tf.placeholder(name="inp_heuristics", shape=[None, 10, 10, 1], dtype=tf.float32)
out = tf.layers.conv2d(kernel_size=5, kernel_initializer=kernel_init,
filters=4, inputs=inp_heuristics, padding='same',
bias_initializer=bias_init, activation='relu')
out = tf.layers.max_pooling2d(inputs=out, pool_size=10, strides=1)
labels = tf.squeeze(tf.sign(tf.reduce_sum(out, axis=3)))
samples = (np.random.uniform(size = [5, 10,10]) < .3).astype(float).reshape(-1,10,10,1)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
res = sess.run(labels, feed_dict={inp_heuristics: samples})
res
np.rollaxis(samples[3],-1, 0)
def create_samples(sess, placeholder, n=100):
samples = (np.random.uniform(size = [n, 10,10]) < .3).astype(float).reshape(-1,10,10,1)
lbls = sess.run(labels, feed_dict={placeholder: samples})
return samples, lbls
class ResNet:
"""
After sufficient training, this instance of ResNet takes an array of dimensions 10x10
and returns 1 if the array contains the pattern you tought it to recognize.
"""
def __init__(self, n_blocks):
self.inps = tf.placeholder(name="inp_resnet", shape=[None, 10, 10, 1], dtype=tf.float32)
self.lbls = tf.placeholder(name="lbl_resnet", shape=[None, 1], dtype=tf.float32)
out = self.inps
for i in range(n_blocks):
out = self._res_block(out)
out = tf.layers.conv2d(kernel_size=1, filters=1, inputs=out, padding='same', activation='sigmoid')
self.out = tf.reshape(tf.layers.max_pooling2d(inputs=out, pool_size=10, strides=1), [-1, 1])
self.errors = (self.lbls - self.out)**2
self.accuracy=tf.reduce_sum(tf.cast(self.errors < .1, dtype=tf.int64))
self.loss = tf.losses.mean_squared_error(self.out, self.lbls)
self.optimizer = tf.train.AdamOptimizer(learning_rate=1e-4)
self.trainer = self.optimizer.minimize(self.loss)
def _res_block(self, inp, filters=16, activation='elu'):
out1_3 = tf.layers.conv2d(kernel_size=3, filters=filters, inputs=inp, padding='same', activation=activation)
out1_5 = tf.layers.conv2d(kernel_size=5, filters=filters, inputs=inp, padding='same', activation=activation)
out1 = tf.concat([out1_3, out1_5], axis=3)
out2_3 = tf.layers.conv2d(kernel_size=3, filters=filters, inputs=out1, padding='same', activation=activation)
out2_5 = tf.layers.conv2d(kernel_size=5, filters=filters, inputs=out1, padding='same', activation=activation)
out2 = tf.concat([out2_3, out2_5], axis=3)
out3_3 = tf.layers.conv2d(kernel_size=3, filters=filters, inputs=out2, padding='same', activation=activation)
out3_5 = tf.layers.conv2d(kernel_size=5, filters=filters, inputs=out2, padding='same', activation=activation)
out3 = tf.concat([out3_3, out3_5], axis=3)
bn = tf.layers.batch_normalization(inputs=out3)
skip = tf.layers.conv2d(kernel_size=1, filters=2, inputs=inp, padding='same', activation=None)
return tf.concat([skip, bn], axis=3)
resnet = ResNet(10)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for i in range(40001):
smp, lbl = create_samples(sess, inp_heuristics, 500)
lbl = lbl.reshape([-1, 1])
l, o, _ = sess.run([resnet.loss, resnet.out, resnet.trainer], feed_dict={resnet.lbls: lbl, resnet.inps: smp})
if i % 1000 == 0:
print("training loss %s: " % l)
smp, lbl = create_samples(sess, inp_heuristics, 1000)
lbl = lbl.reshape([-1, 1])
acc, pred, err = sess.run([resnet.accuracy, resnet.out, resnet.errors], feed_dict={resnet.inps: smp, resnet.lbls: lbl})
print("Accuracy %s" % acc)
| 0.817028 | 0.855972 |
# Funciones de utilidad y aversión al riesgo
<img style="float: right; margin: 0px 0px 15px 15px;" src="https://upload.wikimedia.org/wikipedia/commons/6/62/Risk_down_arrow.png" width="400px" height="400px" />
En el módulo anterior aprendimos
- qué es un portafolio, cómo medir su rendimiento esperado y su volatilidad;
- un portafolio de activos riesgosos tiene menos riesgo que la suma ponderada de los riesgos individuales,
- y que esto se logra mediante el concepto de diversificación;
- la diversificación elimina el riesgo idiosincrático, que es el que afecta a cada compañía en particular,
- sin embargo, el riesgo de mercado no se puede eliminar porque afecta a todos por igual.
- Finalmente, aprendimos conceptos importantes como frontera de mínima varianza, portafolios eficientes y el portafolio de mínima varianza, que son claves en el problema de selección óptima de portafolios.
Muy bien, sin embargo, para plantear el problema de selección óptima de portafolios necesitamos definir la función que vamos a optimizar: función de utilidad.
**Objetivos:**
- ¿Cómo tomamos decisiones según los economistas?
- ¿Cómo toman decisiones los inversionistas?
- ¿Qué son las funciones de utilidad?
*Referencia:*
- Notas del curso "Portfolio Selection and Risk Management", Rice University, disponible en Coursera.
___
## 1. Introducción
La teoría económica comienza con una suposición muy importante:
- **cada individuo actúa para obtener el mayor beneficio posible con los recursos disponibles**.
- En otras palabras, **maximizan su propia utilidad**
¿Qué es utilidad?
- Es un concepto relacionado con la felicidad, pero más amplio.
- Por ejemplo, yo obtengo utilidad de lavar mis dientes o comer sano. Ninguna de las dos me brindan felicidad, pero lo primero mantendrá mis dientes sanos y en el largo plazo, lo segundo probablemente contribuirá a una buena vejez.
Los economistas no se preocupan en realidad por lo que nos da utilidad, sino simplemente que cada uno de nosotros tiene sus propias preferencias.
- Por ejemplo, a mi me gusta el café, el fútbol, los perros, la academia, viajar, entre otros.
- Ustedes tienen sus propias preferencias también.
La vida es compleja y con demasiada incertidumbre. Debemos tomar decisiones a cada momento, y estas decisiones involucran ciertos "trade-off".
- Por ejemplo, normalmente tenemos una compensación entre utilidad hoy contra utilidad en el futuro.
- Debemos balancear nuestro consumo hoy contra nuestro consumo luego.
- Por ejemplo, ustedes gastan cerca de cuatro horas a la semana viniendo a clases de portafolios, porque esperan que esto contribuya a mejorar su nivel de vida en el futuro.
De manera que los economistas dicen que cada individuo se comporta como el siguiente optimizador:
\begin{align}
\max & \quad\text{Utilidad}\\
\text{s. a.} & \quad\text{Recursos disponibles}
\end{align}
¿Qué tiene que ver todo esto con el curso?
- En este módulo desarrollaremos herramientas para describir las preferencias de los inversionistas cuando se encuentran con decisiones de riesgo y rendimiento.
- Veremos como podemos medir la actitud frente al riesgo, ¿cuánto te gusta o disgusta el riesgo?
- Finalmente, veremos como podemos formular el problema de maximizar la utilidad de un inversionista para tomar la decisión de inversión óptima.
___
## 2. Funciones de utilidad.
¿Cómo tomamos decisiones?
Por ejemplo:
- Ustedes tienen que decidir si venir a clase o quedarse en su casa viendo Netflix, o ir al gimnasio.
- Tienen que decidir entre irse de fiesta cada fin, o ahorrar para salir de vacaciones.
En el caso de un portafolio, la decisión que se debe tomar es **¿cuáto riesgo estás dispuesto a tomar por qué cantidad de rendimiento?**
**¿Cómo evaluarías el "trade-off" entre tener cetes contra una estrategia muy riesgosa con un posible altísimo rendimiento?**
De manera que veremos como tomamos decisiones cuando tenemos distintas posibilidades. Específicamente, hablaremos acerca de las **preferencias**, como los economistas usan dichas preferencias para explicar las decisiones y los "trade-offs" en dichas decisiones.
Usamos las **preferencias** para describir las decisiones que tomamos. Las preferencias nos dicen cómo un individuo evalúa los "trade-offs" entre distintas elecciones.
Por definición, las preferencias son únicas para cada individuo. En el problema de selección de portafolios:
- las preferencias que dictan cuánto riesgo estás dispuesto a asumir por cuánto rendimiento, son específicas para cada uno de ustedes.
- Sus respuestas a esa pregunta pueden ser muy distintas, porque tenemos distintas preferencias.
Ahora, nosotros no podemos *cuantificar* dichas preferencias.
- Por esto usamos el concepto de utilidad, para medir qué tan satisfecho está un individuo con sus elecciones.
- Así que podemos pensar en la utilidad como un indicador numérico que describe las preferencias,
- o un índice que nos ayuda a clasificar diferentes decisiones.
- En términos simples, **la utilidad nos ayuda a transmitir a números la noción de cómo te sientes**;
- mientras más utilidad, mejor te sientes.
**Función de utilidad**: manera sistemática de asignar una medida o indicador numérico para clasificar diferentes escogencias.
El número que da una función de utilidad no tiene significado alguno. Simplemente es una manera de clasificar diferentes decisiones.
**Ejemplo.**
Podemos escribir la utilidad de un inversionista como función de la riqueza,
$$U(W).$$
- $U(W)$ nos da una medida de qué tan satisfechos estamos con el nivel de riqueza que tenemos.
- $U(W)$ no es la riqueza como tal, sino que la función de utilidad traduce la cantidad de riqueza en un índice numérico subjetivo.
¿Cómo luciría gráficamente una función de utilidad de riqueza $U(W)$?
<font color=blue> Ver en el tablero </font>
- ¿Qué caracteristicas debe tener?
- ¿Cómo es su primera derivada?
- ¿Cómo es su segunda derivada?
- Tiempos buenos: riqueza alta (¿cómo es la primera derivada acá?)
- Tiempos malos: poca riqueza (¿cómo es la primera derivada acá?)
## 3. Aversión al riesgo
Una dimensión importante en la toma de decisiones en finanzas y economía es la **incertidumbre**. Probablemente no hay ninguna decisión en economía que no involucre riesgo.
- A la mayoría de las personas no les gusta mucho el riesgo.
- De hecho, estudios del comportamiento humano de cara al riesgo, sugieren fuertemente que los seres humanos somos aversos al riesgo.
- Por ejemplo, la mayoría de hogares poseen seguros para sus activos.
- Así, cuando planteamos el problema de selección óptima de portafolios, suponemos que el inversionista es averso al riesgo.
¿Qué significa esto en términos de preferencias? ¿Cómo lo medimos?
- Como seres humanos, todos tenemos diferentes genes y preferencias, y esto aplica también a la actitud frente al riesgo.
- Por tanto, la aversión al riesgo es clave en cómo describimos las preferencias de un inversinista.
- Individuos con un alto grado de aversión al riesgo valorarán la seguridad a un alto precio, mientras otros no tanto.
- De manera que alguien con alta aversión al riesgo, no querrá enfrentarse a una situación con resultado incierto y querrá pagar una gran prima de seguro para eliminar dicho riesgo.
- O equivalentemente, una persona con alta aversión al riesgo requerirá una compensación alta si se decide a asumir ese riesgo.
El **grado de aversión al riesgo** mide qué tanto un inversionista prefiere un resultado seguro a un resultado incierto.
Lo opuesto a aversión al riesgo es **tolerancia al riesgo**.
<font color=blue> Ver en el tablero gráficamente, cómo se explica la aversión al riesgo desde las funciones de utilidad. </font>
**Conclusión:** la concavidad en la función de utilidad dicta qué tan averso al riesgo es el individuo.
### ¿Cómo medimos el grado de aversión al riesgo de un individuo?
¿Saben cuál es su coeficiente de aversión al riesgo? Podemos estimarlo.
Suponga que se puede participar en la siguiente lotería:
- usted puede ganar $\$1000$ con $50\%$ de probabilidad, o
- puede ganar $\$500$ con $50\%$ de probabilidad.
Es decir, de entrada usted tendrá $\$500$ seguros pero también tiene la posibilidad de ganar $\$1000$.
¿Cuánto estarías dispuesto a pagar por esta oportunidad?
Bien, podemos relacionar tu respuesta con tu coeficiente de aversión al riesgo.
| Coeficiente de aversión al riesgo | Cantidad que pagarías |
| --------------------------------- | --------------------- |
| 0 | 750 |
| 0.5 | 729 |
| 1 | 707 |
| 2 | 667 |
| 3 | 632 |
| 4 | 606 |
| 5 | 586 |
| 10 | 540 |
| 15 | 525 |
| 20 | 519 |
| 50 | 507 |
La mayoría de la gente está dispuesta a pagar entre $\$540$ (10) y $\$707$ (1). Es muy raro encontrar coeficientes de aversión al riesgo menores a 1. Esto está soportado por una gran cantidad de encuestas.
- En el mundo financiero, los consultores financieros utilizan cuestionarios para medir el coeficiente de aversión al riesgo.
**Ejemplo.** Describir en términos de aversión al riesgo las siguientes funciones de utilidad que dibujaré en el tablero.
___
# Anuncios
## 1. Quiz la siguiente clase.
## 2. Examen Módulos 1 y 2: martes 8 de octubre.
## 3. Recordar Tarea 5 para martes 1 de octubre.
<script>
$(document).ready(function(){
$('div.prompt').hide();
$('div.back-to-top').hide();
$('nav#menubar').hide();
$('.breadcrumb').hide();
$('.hidden-print').hide();
});
</script>
<footer id="attribution" style="float:right; color:#808080; background:#fff;">
Created with Jupyter by Esteban Jiménez Rodríguez.
</footer>
|
github_jupyter
|
# Funciones de utilidad y aversión al riesgo
<img style="float: right; margin: 0px 0px 15px 15px;" src="https://upload.wikimedia.org/wikipedia/commons/6/62/Risk_down_arrow.png" width="400px" height="400px" />
En el módulo anterior aprendimos
- qué es un portafolio, cómo medir su rendimiento esperado y su volatilidad;
- un portafolio de activos riesgosos tiene menos riesgo que la suma ponderada de los riesgos individuales,
- y que esto se logra mediante el concepto de diversificación;
- la diversificación elimina el riesgo idiosincrático, que es el que afecta a cada compañía en particular,
- sin embargo, el riesgo de mercado no se puede eliminar porque afecta a todos por igual.
- Finalmente, aprendimos conceptos importantes como frontera de mínima varianza, portafolios eficientes y el portafolio de mínima varianza, que son claves en el problema de selección óptima de portafolios.
Muy bien, sin embargo, para plantear el problema de selección óptima de portafolios necesitamos definir la función que vamos a optimizar: función de utilidad.
**Objetivos:**
- ¿Cómo tomamos decisiones según los economistas?
- ¿Cómo toman decisiones los inversionistas?
- ¿Qué son las funciones de utilidad?
*Referencia:*
- Notas del curso "Portfolio Selection and Risk Management", Rice University, disponible en Coursera.
___
## 1. Introducción
La teoría económica comienza con una suposición muy importante:
- **cada individuo actúa para obtener el mayor beneficio posible con los recursos disponibles**.
- En otras palabras, **maximizan su propia utilidad**
¿Qué es utilidad?
- Es un concepto relacionado con la felicidad, pero más amplio.
- Por ejemplo, yo obtengo utilidad de lavar mis dientes o comer sano. Ninguna de las dos me brindan felicidad, pero lo primero mantendrá mis dientes sanos y en el largo plazo, lo segundo probablemente contribuirá a una buena vejez.
Los economistas no se preocupan en realidad por lo que nos da utilidad, sino simplemente que cada uno de nosotros tiene sus propias preferencias.
- Por ejemplo, a mi me gusta el café, el fútbol, los perros, la academia, viajar, entre otros.
- Ustedes tienen sus propias preferencias también.
La vida es compleja y con demasiada incertidumbre. Debemos tomar decisiones a cada momento, y estas decisiones involucran ciertos "trade-off".
- Por ejemplo, normalmente tenemos una compensación entre utilidad hoy contra utilidad en el futuro.
- Debemos balancear nuestro consumo hoy contra nuestro consumo luego.
- Por ejemplo, ustedes gastan cerca de cuatro horas a la semana viniendo a clases de portafolios, porque esperan que esto contribuya a mejorar su nivel de vida en el futuro.
De manera que los economistas dicen que cada individuo se comporta como el siguiente optimizador:
\begin{align}
\max & \quad\text{Utilidad}\\
\text{s. a.} & \quad\text{Recursos disponibles}
\end{align}
¿Qué tiene que ver todo esto con el curso?
- En este módulo desarrollaremos herramientas para describir las preferencias de los inversionistas cuando se encuentran con decisiones de riesgo y rendimiento.
- Veremos como podemos medir la actitud frente al riesgo, ¿cuánto te gusta o disgusta el riesgo?
- Finalmente, veremos como podemos formular el problema de maximizar la utilidad de un inversionista para tomar la decisión de inversión óptima.
___
## 2. Funciones de utilidad.
¿Cómo tomamos decisiones?
Por ejemplo:
- Ustedes tienen que decidir si venir a clase o quedarse en su casa viendo Netflix, o ir al gimnasio.
- Tienen que decidir entre irse de fiesta cada fin, o ahorrar para salir de vacaciones.
En el caso de un portafolio, la decisión que se debe tomar es **¿cuáto riesgo estás dispuesto a tomar por qué cantidad de rendimiento?**
**¿Cómo evaluarías el "trade-off" entre tener cetes contra una estrategia muy riesgosa con un posible altísimo rendimiento?**
De manera que veremos como tomamos decisiones cuando tenemos distintas posibilidades. Específicamente, hablaremos acerca de las **preferencias**, como los economistas usan dichas preferencias para explicar las decisiones y los "trade-offs" en dichas decisiones.
Usamos las **preferencias** para describir las decisiones que tomamos. Las preferencias nos dicen cómo un individuo evalúa los "trade-offs" entre distintas elecciones.
Por definición, las preferencias son únicas para cada individuo. En el problema de selección de portafolios:
- las preferencias que dictan cuánto riesgo estás dispuesto a asumir por cuánto rendimiento, son específicas para cada uno de ustedes.
- Sus respuestas a esa pregunta pueden ser muy distintas, porque tenemos distintas preferencias.
Ahora, nosotros no podemos *cuantificar* dichas preferencias.
- Por esto usamos el concepto de utilidad, para medir qué tan satisfecho está un individuo con sus elecciones.
- Así que podemos pensar en la utilidad como un indicador numérico que describe las preferencias,
- o un índice que nos ayuda a clasificar diferentes decisiones.
- En términos simples, **la utilidad nos ayuda a transmitir a números la noción de cómo te sientes**;
- mientras más utilidad, mejor te sientes.
**Función de utilidad**: manera sistemática de asignar una medida o indicador numérico para clasificar diferentes escogencias.
El número que da una función de utilidad no tiene significado alguno. Simplemente es una manera de clasificar diferentes decisiones.
**Ejemplo.**
Podemos escribir la utilidad de un inversionista como función de la riqueza,
$$U(W).$$
- $U(W)$ nos da una medida de qué tan satisfechos estamos con el nivel de riqueza que tenemos.
- $U(W)$ no es la riqueza como tal, sino que la función de utilidad traduce la cantidad de riqueza en un índice numérico subjetivo.
¿Cómo luciría gráficamente una función de utilidad de riqueza $U(W)$?
<font color=blue> Ver en el tablero </font>
- ¿Qué caracteristicas debe tener?
- ¿Cómo es su primera derivada?
- ¿Cómo es su segunda derivada?
- Tiempos buenos: riqueza alta (¿cómo es la primera derivada acá?)
- Tiempos malos: poca riqueza (¿cómo es la primera derivada acá?)
## 3. Aversión al riesgo
Una dimensión importante en la toma de decisiones en finanzas y economía es la **incertidumbre**. Probablemente no hay ninguna decisión en economía que no involucre riesgo.
- A la mayoría de las personas no les gusta mucho el riesgo.
- De hecho, estudios del comportamiento humano de cara al riesgo, sugieren fuertemente que los seres humanos somos aversos al riesgo.
- Por ejemplo, la mayoría de hogares poseen seguros para sus activos.
- Así, cuando planteamos el problema de selección óptima de portafolios, suponemos que el inversionista es averso al riesgo.
¿Qué significa esto en términos de preferencias? ¿Cómo lo medimos?
- Como seres humanos, todos tenemos diferentes genes y preferencias, y esto aplica también a la actitud frente al riesgo.
- Por tanto, la aversión al riesgo es clave en cómo describimos las preferencias de un inversinista.
- Individuos con un alto grado de aversión al riesgo valorarán la seguridad a un alto precio, mientras otros no tanto.
- De manera que alguien con alta aversión al riesgo, no querrá enfrentarse a una situación con resultado incierto y querrá pagar una gran prima de seguro para eliminar dicho riesgo.
- O equivalentemente, una persona con alta aversión al riesgo requerirá una compensación alta si se decide a asumir ese riesgo.
El **grado de aversión al riesgo** mide qué tanto un inversionista prefiere un resultado seguro a un resultado incierto.
Lo opuesto a aversión al riesgo es **tolerancia al riesgo**.
<font color=blue> Ver en el tablero gráficamente, cómo se explica la aversión al riesgo desde las funciones de utilidad. </font>
**Conclusión:** la concavidad en la función de utilidad dicta qué tan averso al riesgo es el individuo.
### ¿Cómo medimos el grado de aversión al riesgo de un individuo?
¿Saben cuál es su coeficiente de aversión al riesgo? Podemos estimarlo.
Suponga que se puede participar en la siguiente lotería:
- usted puede ganar $\$1000$ con $50\%$ de probabilidad, o
- puede ganar $\$500$ con $50\%$ de probabilidad.
Es decir, de entrada usted tendrá $\$500$ seguros pero también tiene la posibilidad de ganar $\$1000$.
¿Cuánto estarías dispuesto a pagar por esta oportunidad?
Bien, podemos relacionar tu respuesta con tu coeficiente de aversión al riesgo.
| Coeficiente de aversión al riesgo | Cantidad que pagarías |
| --------------------------------- | --------------------- |
| 0 | 750 |
| 0.5 | 729 |
| 1 | 707 |
| 2 | 667 |
| 3 | 632 |
| 4 | 606 |
| 5 | 586 |
| 10 | 540 |
| 15 | 525 |
| 20 | 519 |
| 50 | 507 |
La mayoría de la gente está dispuesta a pagar entre $\$540$ (10) y $\$707$ (1). Es muy raro encontrar coeficientes de aversión al riesgo menores a 1. Esto está soportado por una gran cantidad de encuestas.
- En el mundo financiero, los consultores financieros utilizan cuestionarios para medir el coeficiente de aversión al riesgo.
**Ejemplo.** Describir en términos de aversión al riesgo las siguientes funciones de utilidad que dibujaré en el tablero.
___
# Anuncios
## 1. Quiz la siguiente clase.
## 2. Examen Módulos 1 y 2: martes 8 de octubre.
## 3. Recordar Tarea 5 para martes 1 de octubre.
<script>
$(document).ready(function(){
$('div.prompt').hide();
$('div.back-to-top').hide();
$('nav#menubar').hide();
$('.breadcrumb').hide();
$('.hidden-print').hide();
});
</script>
<footer id="attribution" style="float:right; color:#808080; background:#fff;">
Created with Jupyter by Esteban Jiménez Rodríguez.
</footer>
| 0.453262 | 0.930836 |
# PTQ 与 QAT 实践
本文主要介绍如何使用 PyTorch 将浮点模型转换为 PTQ 或者 QAT 模型。
## 背景
{guilabel}`目标`:快速将浮点模型转换为 PTQ 或者 QAT 模型。
### 读者
本教程适用于会使用 PyTorch 编写 CNN 等模块的的算法工程师。
### 环境配置
本文使用 Python 3.10.0 (其他版本请自测),暂时仅 Linux 平台被测试。
查看 `torch` 和 `torchvision` 的版本:
```
import torch
import torchvision
print(f'torch: {torch.__version__} \n'
f'torchvision: {torchvision.__version__}')
```
设置一些警告配置:
```
# 设置 warnings
import warnings
warnings.filterwarnings(
action='ignore',
category=DeprecationWarning,
module='.*'
)
warnings.filterwarnings(
action='ignore',
module='torch.ao.quantization'
)
```
## 概述:PQT 与 QAT
参考:[量化](https://pytorch.org/docs/master/quantization.html)
`训练后量化`
: 简称 PTQ(Post Training Quantization):权重量化,激活量化,需要借助数据在训练后进行校准。
`静态量化感知训练`
: 简称 QAT(static quantization aware training):权重量化,激活量化,在训练过程中的量化数值进行建模。
`浮点模型`
: 模型的 **权重** 和 **激活** 均为浮点类型(如 {data}`torch.float32`, {data}`torch.float64`)。
`量化模型`
: 模型的 **权重** 和 **激活** 均为量化类型(如 {data}`torch.qint32`, {data}`torch.qint8`, {data}`torch.quint8`, {data}`torch.quint2x4`, {data}`torch.quint4x2`)。
下面举例说明如何将浮点模型转换为量化模型。
为了方便说明定义如下模块:
```{rubric} 定义简单的浮点模块
```
```
from torch import nn, Tensor
class M(nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(1, 1, 1)
self.relu = torch.nn.ReLU()
def _forward_impl(self, x: Tensor) -> Tensor:
'''提供便捷函数'''
x = self.conv(x)
x = self.relu(x)
return x
def forward(self, x: Tensor) -> Tensor:
x= self._forward_impl(x)
return x
```
```{rubric} 定义可量化模块
```
将浮点模块 `M` 转换为可量化模块 `QM`(量化流程的最关键的一步)。
```
from torch.ao.quantization import QuantStub, DeQuantStub
class QM(M):
'''
Args:
is_print: 为了测试需求,打印一些信息
'''
def __init__(self, is_print: bool=False):
super().__init__()
self.is_print = is_print
self.quant = QuantStub() # 将张量从浮点转换为量化
self.dequant = DeQuantStub() # 将张量从量化转换为浮点
def forward(self, x: Tensor) -> Tensor:
# 手动指定张量将在量化模型中从浮点模块转换为量化模块的位置
x = self.quant(x)
if self.is_print:
print('量化前的类型:', x.dtype)
x = self._forward_impl(x)
if self.is_print:
print('量化中的类型:',x.dtype)
# 在量化模型中手动指定张量从量化到浮点的转换位置
x = self.dequant(x)
if self.is_print:
print('量化后的类型:', x.dtype)
return x
```
简单测试前向过程的激活数据类型:
```
input_fp32 = torch.randn(4, 1, 4, 4) # 输入的数据
m = QM(is_print=True)
x = m(input_fp32)
```
查看权重的数据类型:
```
m.conv.weight.dtype
```
可以看出,此时模块 `m` 是浮点模块。
### PTQ 简介
当内存带宽和计算空间都很重要时,通常会使用训练后量化,而 CNN 就是其典型的用例。训练后量化对模型的 **权重** 和 **激活** 进行量化。它在可能的情况下将 **激活** 融合到前面的层中。它需要用具有代表性的数据集进行 **校准**,以确定激活的最佳量化参数。
```{rubric} 示意图
```
```
# 原始模型
# 全部的张量和计算均在浮点上进行
previous_layer_fp32 -- linear_fp32 -- activation_fp32 -- next_layer_fp32
/
linear_weight_fp32
# 静态量化模型
# weights 和 activations 在 int8 上
previous_layer_int8 -- linear_with_activation_int8 -- next_layer_int8
/
linear_weight_int8
```
直接创建浮点模块的实例:
```
# 创建浮点模型实例
model_fp32 = QM(is_print=True)
```
要使 PTQ 生效,必须将模型设置为 `eval` 模式:
```
model_fp32.eval()
```
查看此时的数据类型:
```
input_fp32 = torch.randn(4, 1, 4, 4)
x = model_fp32(input_fp32)
print('激活和权重的数据类型分别为:'
f'{x.dtype}, {model_fp32.conv.weight.dtype}')
```
```{rubric} 定义观测器
```
赋值实例变量 `qconfig`,其中包含关于要附加哪种观测器的信息:
- 使用 [`'fbgemm'`](https://github.com/pytorch/FBGEMM) 用于带 AVX2 的 x86(没有AVX2,一些运算的实现效率很低);使用 [`'qnnpack'`](https://github.com/pytorch/pytorch/tree/master/aten/src/ATen/native/quantized/cpu/qnnpack) 用于 ARM CPU(通常出现在移动/嵌入式设备中)。
- 其他量化配置,如选择对称或非对称量化和 `MinMax` 或 `L2Norm` 校准技术,可以在这里指定。
```
model_fp32.qconfig = torch.ao.quantization.get_default_qconfig('fbgemm')
```
查看此时的数据类型:
```
input_fp32 = torch.randn(4, 1, 4, 4)
x = model_fp32(input_fp32)
print('激活和权重的数据类型分别为:'
f'{x.dtype}, {model_fp32.conv.weight.dtype}')
```
```{rubric} 融合激活层
```
在适用的地方,融合 activation 到前面的层(这需要根据模型架构手动完成)。常见的融合包括 `conv + relu` 和 `conv + batchnorm + relu`。
```
model_fp32_fused = torch.ao.quantization.fuse_modules(model_fp32,
[['conv', 'relu']])
model_fp32_fused
```
可以看到 `model_fp32_fused` 中 `ConvReLU2d` 融合 `model_fp32` 的两个层 `conv` 和 `relu`。
查看此时的数据类型:
```
input_fp32 = torch.randn(4, 1, 4, 4)
x = model_fp32_fused(input_fp32)
print('激活和权重的数据类型分别为:'
f'{x.dtype}, {model_fp32.conv.weight.dtype}')
```
```{rubric} 启用观测器
```
在融合后的模块中启用观测器,用于在校准期间观测激活(activation)张量。
```
model_fp32_prepared = torch.quantization.prepare(model_fp32_fused)
```
```{rubric} 校准准备好的模型
```
校准准备好的模型,以确定量化参数的激活在现实世界的设置,校准具有代表性的数据集。
```
input_fp32 = torch.randn(4, 1, 4, 4)
x = model_fp32_prepared(input_fp32)
print('激活和权重的数据类型分别为:'
f'{x.dtype}, {model_fp32.conv.weight.dtype}')
```
```{rubric} 模型转换
```
```{note}
量化权重,计算和存储每个激活张量要使用的尺度(scale)和偏差(bias)值,并用量化实现替换关键算子。
```
转换已校准好的模型为量化模型:
```
model_int8 = torch.quantization.convert(model_fp32_prepared)
model_int8
```
查看权重的数据类型:
```
model_int8.conv.weight().dtype
```
可以看出此时权重的元素大小为 1 字节,而不是 FP32 的 4 字节:
```
model_int8.conv.weight().element_size()
```
运行模型,相关的计算将在 {data}`torch.qint8` 中发生。
```
res = model_int8(input_fp32)
res.dtype
```
要了解更多关于量化意识训练的信息,请参阅 [QAT 教程](https://pytorch.org/tutorials/advanced/static_quantization_tutorial.html)。
### QAT 概述
与其他量化方法相比,QAT 在 **训练过程中** 模拟量化的效果,可以获得更高的 accuracy。在训练过程中,所有的计算都是在浮点上进行的,使用 fake_quant 模块通过夹紧和舍入的方式对量化效果进行建模,模拟 INT8 的效果。模型转换后,权值和激活被量化,激活在可能的情况下被融合到前一层。它通常与 CNN 一起使用,与 PTQ 相比具有更高的 accuracy。
```{rubric} 示意图
```
```
# 原始模型
# 全部张量和计算均在浮点上
previous_layer_fp32 -- linear_fp32 -- activation_fp32 -- next_layer_fp32
/
linear_weight_fp32
# 在训练过程中使用 fake_quants 建模量化数值
previous_layer_fp32 -- fq -- linear_fp32 -- activation_fp32 -- fq -- next_layer_fp32
/
linear_weight_fp32 -- fq
# 量化模型
# weights 和 activations 在 int8 上
previous_layer_int8 -- linear_with_activation_int8 -- next_layer_int8
/
linear_weight_int8
```
定义比 `M` 稍微复杂一点的浮点模块:
```
class M2(M):
def __init__(self):
super().__init__()
self.bn = torch.nn.BatchNorm2d(1)
def _forward_impl(self, x: Tensor) -> Tensor:
'''提供便捷函数'''
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
```
同样需要定义可量化模块:
```
class QM2(M2, QM):
def __init__(self):
super().__init__()
```
创建浮点模型实例:
```
# 创建模型实例
model_fp32 = QM2()
model_fp32
```
模型必须设置为训练模式,以便 QAT 可用:
```
model_fp32.train();
```
添加量化配置(与 PTQ 相同相似):
```
model_fp32.qconfig = torch.ao.quantization.get_default_qat_qconfig('fbgemm')
```
```{rubric} 融合 QAT 模块
```
QAT 的模块融合与 PTQ 相同相似:
```
from torch.ao.quantization import fuse_modules_qat
model_fp32_fused = fuse_modules_qat(model_fp32,
[['conv', 'bn', 'relu']])
```
```{rubric} 准备 QAT 模型
```
这将在模型中插入观测者和伪量化模块,它们将在校准期间观测权重和激活的张量。
```
model_fp32_prepared = torch.quantization.prepare_qat(model_fp32_fused)
```
```{rubric} 训练 QAT 模型
```
```python
# 下文会编写实际的例子,此处没有显示
training_loop(model_fp32_prepared)
```
将观测到的模型转换为量化模型。需要:
- 量化权重,计算和存储用于每个激活张量的尺度(scale)和偏差(bias)值,
- 在适当的地方融合模块,并用量化实现替换关键算子。
```
model_fp32_prepared.eval()
model_int8 = torch.quantization.convert(model_fp32_prepared)
```
运行模型,相关的计算将在 {data}`torch.qint8` 中发生。
```
res = model_int8(input_fp32)
```
要了解更多关于量化意识训练的信息,请参阅 [QAT 教程](https://pytorch.org/tutorials/advanced/static_quantization_tutorial.html)。
### PTQ/QAT 统一的量化流程
PTQ 和 QAT 的量化流程十分相似,为了统一接口,可以使用 `torchvision` 提供的函数 {func}`~torchvision.models.quantization.utils._fuse_modules`。
下面利用函数 {func}`~torchvision.models.quantization.utils._fuse_modules` 可量化模块 `QM2`。
```
from typing import Any
from torch.ao.quantization import fuse_modules, fuse_modules_qat
from torch.ao.quantization import get_default_qconfig, get_default_qat_qconfig
from torch.ao.quantization import quantize, quantize_qat
def _fuse_modules(
model: nn.Module, modules_to_fuse: list[str] | list[list[str]], is_qat: bool | None, **kwargs: Any
):
if is_qat is None:
is_qat = model.training
method = fuse_modules_qat if is_qat else fuse_modules
return method(model, modules_to_fuse, **kwargs)
class QM3(QM2):
'''可量化模型
Args:
is_qat: 是否使用 QAT 模式
'''
def __init__(self, is_qat: bool | None = None, backend='fbgemm'):
super().__init__()
self.is_qat = is_qat
# 定义观测器
if is_qat:
self.train()
self.qconfig = get_default_qat_qconfig(backend)
else:
self.eval()
self.qconfig = get_default_qconfig(backend)
def fuse_model(self) -> None:
'''模块融合'''
if self.is_qat:
modules_to_fuse = ['bn', 'relu']
else:
modules_to_fuse = ['conv', 'bn', 'relu']
return _fuse_modules(self,
modules_to_fuse,
self.is_qat,
inplace=True)
```
有了可量化模块 `QM3`,可以十分便利的切换 PTQ/QAT了。
比如,PTQ,可以这样:
```
def run_fn(model, num_epochs):
for _ in range(num_epochs):
input_fp32 = torch.randn(4, 1, 4, 4)
model(input_fp32)
num_epochs = 10
ptq_model = QM3(is_qat=False)
model_fused = ptq_model.fuse_model()
quanted_model = quantize(model_fused, run_fn, [num_epochs])
```
QAT 可以这样:
```
num_epochs = 10
qat_model = QM3(is_qat=True)
model_fused = qat_model.fuse_model()
quanted_model = quantize_qat(model_fused, run_fn, [num_epochs])
```
### PTQ/QAT 量化策略
对于通用量化技术,需要了解:
1. 将任何需要输出再量化请求的运算(因此有额外的参数)从函数形式转换为模块形式(例如,使用 {class}`torch.nn.ReLU` 而不是 {func}`torch.nn.functional.relu`)。
1. 通过在子模块上指定 `.qconfig` 属性或指定 `qconfig_dict` 来指定模型的哪些部分需要量化。例如,设置 `model.conv1.qconfig = None` 表示 `model.conv1` 层不量化,设置 `model.linear1.qconfig = custom_qconfig` 表示 `model.linear1` 将使用 `custom_qconfig` 而不是全局 `qconfig`。
对于量化激活的静态量化技术(即对模型的权重和激活均进行量化,包括 PTQ 和 QAT),用户还需要做以下工作:
1. 指定量化和反量化激活的位置。这是使用 {class}`~torch.ao.quantization.stubs.QuantStub` 和 {class}`~torch.ao.quantization.stubs.DeQuantStub` 模块完成的。
1. 使用 {class}`~torch.nn.quantized.FloatFunctional` 将需要对量化进行特殊处理的张量运算封装到模块中。例如像 {func}`add` 和 {func}`cat` 这样需要特殊处理来确定输出量化参数的运算。
1. 融合模块:将运算/模块组合成单个模块,获得更高的 accuracy 和性能。这是使用 {func}`~torch.ao.quantization.fuse_modules.fuse_modules` API 完成的,该 API 接受要融合的模块列表。目前支持以下融合:`[Conv, Relu]`、 `[Conv, BatchNorm]`、 `[Conv, BatchNorm, Relu]` 和 `[Linear, Relu]`。
示例:
```{figure} images/resnet.png
:align: center
:class: w3-border
倒置残差块的转换前后对比
```
## PTQ 和 QAT 实战
```{rubric} 模型对比
```
类型|大小(MB)|accuracy($\%$)
:-|:-|:-
浮点|9.188|94.91
浮点融合|8.924|94.91
QAT|2.657|94.41
```{rubric} 不同 QConfig 的静态 PTQ 模型
```
accuracy($\%$)|激活|权重|
:-|:-|:-
|51.11|{data}`~torch.ao.quantization.observer.MinMaxObserver`.`with_args(quant_min=0, quant_max=127)`|{data}`~torch.ao.quantization.observer.MinMaxObserver`.`with_args(dtype=torch.qint8, qscheme=torch.per_tensor_symmetric)`
80.42|{data}`~torch.ao.quantization.observer.HistogramObserver`.`with_args(quant_min=0, quant_max=127)`|{data}`~torch.ao.quantization.observer.PerChannelMinMaxObserver`.`with_args(dtype=torch.qint8, qscheme=torch.per_channel_symmetric)`
为了提供一致的量化工具接口,我们使用 Python 包 `torchq`。
本地载入临时 `torchq` 包:
```
from mod import torchq
```
```{tip}
本文使用 `torchq` 的 `'0.0.1-alpha'` 版本。
```
更方便的是:使用 `pip` 安装:
```shell
pip install torchq==0.0.1-alpha
```
接着,便可以直接导入:
```python
import torchq
```
```{tip}
本文使用 `torchq` 的 `'0.0.1-alpha'` 版本。
```
可以看出 PTQ 和 QAT 需要用户自定义的内容主要集中在: **模块融合** 和 **算子替换**。
{func}`~torchvision.models.quantization.utils._fuse_modules` 提供了 {func}`~torch.ao.quantization.fuse_modules.fuse_modules` 和 {func}`~torch.ao.quantization.fuse_modules.fuse_modules_qat` 的统一接口。下面以 MobileNetV2 为例,简述如何使用 {func}`~torchvision.models.quantization.utils._fuse_modules` 函数和 {class}`~torch.nn.quantized.FloatFunctional` 类定制可量化的模块。
```
'''参考 torchvision/models/quantization/mobilenetv2.py
'''
from typing import Any
from torch import Tensor
from torch import nn
from torchvision._internally_replaced_utils import load_state_dict_from_url
from torchvision.ops.misc import ConvNormActivation
from torchvision.models.quantization.utils import _fuse_modules, _replace_relu, quantize_model
from torch.ao.quantization import QuantStub, DeQuantStub
from torchvision.models.mobilenetv2 import InvertedResidual, MobileNetV2, model_urls
quant_model_urls = {
"mobilenet_v2_qnnpack": "https://download.pytorch.org/models/quantized/mobilenet_v2_qnnpack_37f702c5.pth"
}
class QuantizableInvertedResidual(InvertedResidual):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.skip_add = nn.quantized.FloatFunctional()
def forward(self, x: Tensor) -> Tensor:
if self.use_res_connect:
return self.skip_add.add(x, self.conv(x))
else:
return self.conv(x)
def fuse_model(self, is_qat: bool | None = None) -> None:
for idx in range(len(self.conv)):
if type(self.conv[idx]) is nn.Conv2d:
_fuse_modules(self.conv,
[str(idx),
str(idx + 1)],
is_qat,
inplace=True)
class QuantizableMobileNetV2(MobileNetV2):
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""
MobileNet V2 main class
Args:
继承自浮点 MobileNetV2 的参数
"""
super().__init__(*args, **kwargs)
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x: Tensor) -> Tensor:
x = self.quant(x)
x = self._forward_impl(x)
x = self.dequant(x)
return x
def fuse_model(self, is_qat: bool | None=None) -> None:
for m in self.modules():
if type(m) is ConvNormActivation:
_fuse_modules(m, ["0", "1", "2"], is_qat, inplace=True)
if type(m) is QuantizableInvertedResidual:
m.fuse_model(is_qat)
def mobilenet_v2(
pretrained: bool = False,
progress: bool = True,
quantize: bool = False,
**kwargs: Any,
) -> QuantizableMobileNetV2:
"""
从 `MobileNetV2:反向残差和线性瓶颈 <https://arxiv.org/abs/1801.04381>`_ 构建 MobileNetV2 架构。
注意,quantize = True 返回具有 8 bit 权值的量化模型。量化模型只支持推理并在 CPU 上运行。
目前还不支持 GPU 推理
Args:
pretrained (bool): 如果为 True,返回在 ImageNet 上训练过的模型。
progress (bool): 如果为 True,则显示下载到标准错误的进度条
quantize(bool): 如果为 True,则返回量化模型,否则返回浮点模型
"""
model = QuantizableMobileNetV2(block=QuantizableInvertedResidual, **kwargs)
_replace_relu(model)
if quantize:
# TODO use pretrained as a string to specify the backend
backend = "qnnpack"
quantize_model(model, backend)
else:
assert pretrained in [True, False]
if pretrained:
if quantize:
model_url = quant_model_urls["mobilenet_v2_" + backend]
else:
model_url = model_urls["mobilenet_v2"]
state_dict = load_state_dict_from_url(model_url, progress=progress)
model.load_state_dict(state_dict)
return model
```
### 一些准备工作
下面以 Cifar10 为了来说明 PTQ/QAT 的量化流程。
定义几个[辅助函数](https://github.com/pytorch/examples/blob/master/imagenet/main.py)来帮助评估模型。
```
from torchq.helper import evaluate, print_size_of_model, load_model
```
设置超参数:
```
saved_model_dir = 'models/'
float_model_file = 'mobilenet_pretrained_float.pth'
scripted_float_model_file = 'mobilenet_float_scripted.pth'
scripted_ptq_model_file = 'mobilenet_ptq_scripted.pth'
scripted_quantized_model_file = 'mobilenet_quantization_scripted_quantized.pth'
scripted_qat_model_file = 'mobilenet_qat_scripted_quantized.pth'
learning_rate = 5e-5
num_epochs = 30
batch_size = 16
num_classes = 10
# 设置评估策略
criterion = nn.CrossEntropyLoss()
```
定义数据集和数据加载器:
```
from torchq.xinet import CV
# 为了 cifar10 匹配 ImageNet,需要将其 resize 到 224
train_iter, test_iter = CV.load_data_cifar10(batch_size=batch_size,
resize=224)
```
查看数据集的 batch 次数:
```
print('训练、测试批次分别为:',
len(train_iter), len(test_iter))
```
获取训练和测试数据集的大小:
```
num_train = sum(len(ys) for _, ys in train_iter)
num_eval = sum(len(ys) for _, ys in test_iter)
num_train, num_eval
```
### 微调浮点模型
配置浮点模型:
```
#from torchvision.models.quantization import mobilenet_v2
# 定义模型
def create_model(quantize=False,
num_classes=10,
pretrained=False):
float_model = mobilenet_v2(pretrained=pretrained,
quantize=quantize)
# 匹配 ``num_classes``
float_model.classifier[1] = nn.Linear(float_model.last_channel,
num_classes)
return float_model
```
定义模型:
```
float_model = create_model(pretrained=True,
quantize=False,
num_classes=num_classes)
```
定义微调的函数 {class}`torchq.xinet.CV`.{func}`train_fine_tuning` 用于模型。
微调浮点模型:
```
CV.train_fine_tuning(float_model, train_iter, test_iter,
learning_rate=learning_rate,
num_epochs=num_epochs,
device='cuda:2',
param_group=True)
```
保存模型:
```
torch.save(float_model.state_dict(), saved_model_dir + float_model_file)
```
### 配置可量化模型
加载浮点模型:
```
float_model = create_model(quantize=False,
num_classes=num_classes)
float_model = load_model(float_model, saved_model_dir + float_model_file)
```
查看浮点模型的信息:
```
def print_info(model,
model_type='浮点模型',
test_iter=test_iter,
criterion=criterion, num_eval=num_eval):
'''打印信息'''
print_size_of_model(model)
top1, top5 = evaluate(model, criterion, test_iter)
print(f'\n{model_type}:\n\t'
f'在 {num_eval} 张图片上评估 accuracy 为: {top1.avg:2.5f}')
print_info(float_model, model_type='浮点模型')
```
可以先查看融合前的 inverted residual 块:
```
float_model.features[1].conv
```
融合模块:
```
float_model.fuse_model(is_qat=None)
```
查看融合后的 inverted residual 块:
```
float_model.features[1].conv
```
为了得到“基线”精度,看看融合模块的非量化模型的精度:
```
model_type = '融合后的浮点模型'
print("baseline 模型大小")
print_size_of_model(float_model)
top1, top5 = evaluate(float_model, criterion, test_iter)
from torch import jit
print(f'\n{model_type}:\n\t在 {num_eval} 张图片上评估 accuracy 为: {top1.avg:2.2f}')
# 保存
jit.save(jit.script(float_model), saved_model_dir + scripted_float_model_file)
```
这将是我们进行比较的基准。接下来,尝试不同的量化方法。
### PTQ 实战
```
# 加载模型
myModel = create_model(pretrained=False,
quantize=False,
num_classes=num_classes)
float_model = load_model(myModel,
saved_model_dir + float_model_file)
myModel.eval()
# 融合
myModel.fuse_model()
```
指定量化配置(从简单的最小/最大范围估计和加权的逐张量量化开始):
```
from torch.ao.quantization.qconfig import default_qconfig
myModel.qconfig = default_qconfig
myModel.qconfig
```
开始校准准备:
```
from torch.ao.quantization.quantize import prepare
print('PTQ 准备:插入观测者')
prepare(myModel, inplace=True)
print('\n 查看观测者插入后的 inverted residual \n\n',
myModel.features[1].conv)
```
用数据集校准:
```
num_calibration_batches = 200 # 取部分训练集做校准
evaluate(myModel, criterion, train_iter, neval_batches=num_calibration_batches)
print('\nPTQ:校准完成!')
```
转换为量化模型:
```
from torch.ao.quantization.quantize import convert
convert(myModel, inplace=True)
print('PTQ:转换完成!')
```
融合并量化后,查看融合模块的 Inverted Residual 块:
```
myModel.features[1].conv
```
量化后的模型大小:
```
print_size_of_model(myModel)
```
评估:
```
model_type = 'PTQ 模型'
top1, top5 = evaluate(myModel, criterion, test_iter)
print(f'\n{model_type}:\n\t在 {num_eval} 张图片上评估 accuracy 为: {top1.avg:2.2f}')
# jit.save(jit.script(myModel), saved_model_dir + scripted_ptq_model_file)
```
使用了简单的 min/max 观测器来确定量化参数,将模型的大小减少到了 2.36 MB 以下,几乎减少了 4 倍。
此外,通过使用不同的量化配置来显著提高精度(对于量化 ARM 架构的推荐配置重复同样的练习)。该配置的操作如下:
- 在 per-channel 基础上量化权重
- 使用直方图观测器,收集激活的直方图,然后以最佳方式选择量化参数。
```
per_channel_quantized_model = create_model(quantize=False,
num_classes=num_classes)
per_channel_quantized_model = load_model(per_channel_quantized_model,
saved_model_dir + float_model_file)
per_channel_quantized_model.eval()
per_channel_quantized_model.fuse_model()
per_channel_quantized_model.qconfig = get_default_qconfig('fbgemm')
per_channel_quantized_model.qconfig
num_calibration_batches = 200 # 仅仅取 200 个批次
prepare(per_channel_quantized_model, inplace=True)
evaluate(per_channel_quantized_model, criterion,
train_iter, num_calibration_batches)
model_type = 'PTQ 模型(直方图观测器)'
convert(per_channel_quantized_model, inplace=True)
top1, top5 = evaluate(per_channel_quantized_model, criterion, test_iter)
print(f'\n{model_type}:\n\t在 {num_eval} 张图片上评估 accuracy 为: {top1.avg:2.2f}')
jit.save(jit.script(per_channel_quantized_model),
saved_model_dir + scripted_quantized_model_file)
```
仅仅改变这种量化配置方法,就可以将准确度提高到 $80.42\%$ 以上!尽管如此,这还是比 $95\%$ 的基线水平低了 $15\%$。
### QAT 实战
使用 QAT,所有的权值和激活都在前向和后向训练过程中被“伪量化”:也就是说,浮点值被舍入以模拟 int8 值,但所有的计算仍然使用浮点数完成。因此,训练过程中的所有权重调整都是在“感知到”模型最终将被量化的情况下进行的;因此,在量化之后,这种方法通常比动态量化或训练后的静态量化产生更高的精度。
实际执行 QAT 的总体工作流程与之前非常相似:
- 可以使用与以前相同的模型:不需要为量化感知训练做额外的准备。
- 需要使用 `qconfig` 来指定在权重和激活之后插入何种类型的伪量化,而不是指定观测者。
```
def create_qat_model(num_classes,
model_path,
quantize=False,
backend='fbgemm'):
qat_model = create_model(quantize=quantize,
num_classes=num_classes)
qat_model = load_model(qat_model, model_path)
qat_model.fuse_model()
qat_model.qconfig = get_default_qat_qconfig(backend=backend)
return qat_model
```
最后,`prepare_qat` 执行“伪量化”,为量化感知训练准备模型:
```
from torch.ao.quantization.quantize import prepare_qat
model_path = saved_model_dir + float_model_file
qat_model = create_qat_model(num_classes, model_path)
qat_model = prepare_qat(qat_model)
```
Inverted Residual Block:准备好 QAT 后,注意伪量化模块:
```
qat_model.features[1].conv
```
训练具有高精确度的量化模型要求在推理时对数值进行精确的建模。因此,对于量化感知训练,我们对训练循环进行如下修改:
- 将批处理范数转换为训练结束时的运行均值和方差,以更好地匹配推理数值。
- 冻结量化器参数(尺度和零点)并微调权重。
```
CV.train_fine_tuning(qat_model,
train_iter,
test_iter,
learning_rate=learning_rate,
num_epochs=30,
device='cuda:2',
param_group=True,
is_freeze=False,
is_quantized_acc=False,
need_qconfig=False,
ylim=[0.8, 1])
```
```{note}
这里的损失函数向上平移了 0.8 以提供更好的视觉效果。
```
由于量化模型暂仅支持 CPU,故而需要先将模型转换为 CPU 版本,则转为量化版本:
```
convert(qat_model.cpu().eval(), inplace=True)
qat_model.eval();
print_info(qat_model,'QAT 模型')
```
量化感知训练在整个数据集上的准确率超过 $94.4\%$,接近浮点精度 $95\%$。
更多关于 QAT 的内容:
- QAT 是后训练量化技术的超集,允许更多的调试。例如,我们可以分析模型的准确性是否受到权重或激活量化的限制。
- 也可以在浮点上模拟量化模型的准确性,因为使用伪量化来模拟实际量化算法的数值。
- 也可以很容易地模拟训练后量化。
保存 QAT 模型:
```
jit.save(jit.script(qat_model), saved_model_dir + scripted_qat_model_file)
```
### 小结
同样可以使用 {func}`~torch.ao.quantization.quantize.quantize` 和 {func}`~torch.ao.quantization.quantize.quantize_qat` 简化流程。
比如,QAT 流程可以这样:
```python
model_path = saved_model_dir + float_model_file
qat_model = create_qat_model(num_classes, model_path)
num_epochs = 30
ylim = [0.8, 1]
device = 'cuda:2'
is_freeze = False
is_quantized_acc = False
need_qconfig = True # 做一些 QAT 的量化配置工作
param_group = True
# 提供位置参数
args = [train_iter,
test_iter,
learning_rate,
num_epochs,
device,
is_freeze,
is_quantized_acc,
need_qconfig,
param_group,
ylim]
quantized_model = quantize_qat(qat_model, CV.train_fine_tuning, args)
```
简而言之,不管是 PTQ 还是 QAT,我们只需要自定义融合模块函数和量化校准函数(比如 QAT 的训练中校准,PTQ 的训练后校准)。
|
github_jupyter
|
import torch
import torchvision
print(f'torch: {torch.__version__} \n'
f'torchvision: {torchvision.__version__}')
# 设置 warnings
import warnings
warnings.filterwarnings(
action='ignore',
category=DeprecationWarning,
module='.*'
)
warnings.filterwarnings(
action='ignore',
module='torch.ao.quantization'
)
将浮点模块 `M` 转换为可量化模块 `QM`(量化流程的最关键的一步)。
简单测试前向过程的激活数据类型:
查看权重的数据类型:
可以看出,此时模块 `m` 是浮点模块。
### PTQ 简介
当内存带宽和计算空间都很重要时,通常会使用训练后量化,而 CNN 就是其典型的用例。训练后量化对模型的 **权重** 和 **激活** 进行量化。它在可能的情况下将 **激活** 融合到前面的层中。它需要用具有代表性的数据集进行 **校准**,以确定激活的最佳量化参数。
直接创建浮点模块的实例:
要使 PTQ 生效,必须将模型设置为 `eval` 模式:
查看此时的数据类型:
赋值实例变量 `qconfig`,其中包含关于要附加哪种观测器的信息:
- 使用 [`'fbgemm'`](https://github.com/pytorch/FBGEMM) 用于带 AVX2 的 x86(没有AVX2,一些运算的实现效率很低);使用 [`'qnnpack'`](https://github.com/pytorch/pytorch/tree/master/aten/src/ATen/native/quantized/cpu/qnnpack) 用于 ARM CPU(通常出现在移动/嵌入式设备中)。
- 其他量化配置,如选择对称或非对称量化和 `MinMax` 或 `L2Norm` 校准技术,可以在这里指定。
查看此时的数据类型:
在适用的地方,融合 activation 到前面的层(这需要根据模型架构手动完成)。常见的融合包括 `conv + relu` 和 `conv + batchnorm + relu`。
可以看到 `model_fp32_fused` 中 `ConvReLU2d` 融合 `model_fp32` 的两个层 `conv` 和 `relu`。
查看此时的数据类型:
在融合后的模块中启用观测器,用于在校准期间观测激活(activation)张量。
校准准备好的模型,以确定量化参数的激活在现实世界的设置,校准具有代表性的数据集。
转换已校准好的模型为量化模型:
查看权重的数据类型:
可以看出此时权重的元素大小为 1 字节,而不是 FP32 的 4 字节:
运行模型,相关的计算将在 {data}`torch.qint8` 中发生。
要了解更多关于量化意识训练的信息,请参阅 [QAT 教程](https://pytorch.org/tutorials/advanced/static_quantization_tutorial.html)。
### QAT 概述
与其他量化方法相比,QAT 在 **训练过程中** 模拟量化的效果,可以获得更高的 accuracy。在训练过程中,所有的计算都是在浮点上进行的,使用 fake_quant 模块通过夹紧和舍入的方式对量化效果进行建模,模拟 INT8 的效果。模型转换后,权值和激活被量化,激活在可能的情况下被融合到前一层。它通常与 CNN 一起使用,与 PTQ 相比具有更高的 accuracy。
定义比 `M` 稍微复杂一点的浮点模块:
同样需要定义可量化模块:
创建浮点模型实例:
模型必须设置为训练模式,以便 QAT 可用:
添加量化配置(与 PTQ 相同相似):
QAT 的模块融合与 PTQ 相同相似:
这将在模型中插入观测者和伪量化模块,它们将在校准期间观测权重和激活的张量。
将观测到的模型转换为量化模型。需要:
- 量化权重,计算和存储用于每个激活张量的尺度(scale)和偏差(bias)值,
- 在适当的地方融合模块,并用量化实现替换关键算子。
运行模型,相关的计算将在 {data}`torch.qint8` 中发生。
要了解更多关于量化意识训练的信息,请参阅 [QAT 教程](https://pytorch.org/tutorials/advanced/static_quantization_tutorial.html)。
### PTQ/QAT 统一的量化流程
PTQ 和 QAT 的量化流程十分相似,为了统一接口,可以使用 `torchvision` 提供的函数 {func}`~torchvision.models.quantization.utils._fuse_modules`。
下面利用函数 {func}`~torchvision.models.quantization.utils._fuse_modules` 可量化模块 `QM2`。
有了可量化模块 `QM3`,可以十分便利的切换 PTQ/QAT了。
比如,PTQ,可以这样:
QAT 可以这样:
### PTQ/QAT 量化策略
对于通用量化技术,需要了解:
1. 将任何需要输出再量化请求的运算(因此有额外的参数)从函数形式转换为模块形式(例如,使用 {class}`torch.nn.ReLU` 而不是 {func}`torch.nn.functional.relu`)。
1. 通过在子模块上指定 `.qconfig` 属性或指定 `qconfig_dict` 来指定模型的哪些部分需要量化。例如,设置 `model.conv1.qconfig = None` 表示 `model.conv1` 层不量化,设置 `model.linear1.qconfig = custom_qconfig` 表示 `model.linear1` 将使用 `custom_qconfig` 而不是全局 `qconfig`。
对于量化激活的静态量化技术(即对模型的权重和激活均进行量化,包括 PTQ 和 QAT),用户还需要做以下工作:
1. 指定量化和反量化激活的位置。这是使用 {class}`~torch.ao.quantization.stubs.QuantStub` 和 {class}`~torch.ao.quantization.stubs.DeQuantStub` 模块完成的。
1. 使用 {class}`~torch.nn.quantized.FloatFunctional` 将需要对量化进行特殊处理的张量运算封装到模块中。例如像 {func}`add` 和 {func}`cat` 这样需要特殊处理来确定输出量化参数的运算。
1. 融合模块:将运算/模块组合成单个模块,获得更高的 accuracy 和性能。这是使用 {func}`~torch.ao.quantization.fuse_modules.fuse_modules` API 完成的,该 API 接受要融合的模块列表。目前支持以下融合:`[Conv, Relu]`、 `[Conv, BatchNorm]`、 `[Conv, BatchNorm, Relu]` 和 `[Linear, Relu]`。
示例:
## PTQ 和 QAT 实战
类型|大小(MB)|accuracy($\%$)
:-|:-|:-
浮点|9.188|94.91
浮点融合|8.924|94.91
QAT|2.657|94.41
accuracy($\%$)|激活|权重|
:-|:-|:-
|51.11|{data}`~torch.ao.quantization.observer.MinMaxObserver`.`with_args(quant_min=0, quant_max=127)`|{data}`~torch.ao.quantization.observer.MinMaxObserver`.`with_args(dtype=torch.qint8, qscheme=torch.per_tensor_symmetric)`
80.42|{data}`~torch.ao.quantization.observer.HistogramObserver`.`with_args(quant_min=0, quant_max=127)`|{data}`~torch.ao.quantization.observer.PerChannelMinMaxObserver`.`with_args(dtype=torch.qint8, qscheme=torch.per_channel_symmetric)`
为了提供一致的量化工具接口,我们使用 Python 包 `torchq`。
本地载入临时 `torchq` 包:
更方便的是:使用 `pip` 安装:
接着,便可以直接导入:
可以看出 PTQ 和 QAT 需要用户自定义的内容主要集中在: **模块融合** 和 **算子替换**。
{func}`~torchvision.models.quantization.utils._fuse_modules` 提供了 {func}`~torch.ao.quantization.fuse_modules.fuse_modules` 和 {func}`~torch.ao.quantization.fuse_modules.fuse_modules_qat` 的统一接口。下面以 MobileNetV2 为例,简述如何使用 {func}`~torchvision.models.quantization.utils._fuse_modules` 函数和 {class}`~torch.nn.quantized.FloatFunctional` 类定制可量化的模块。
### 一些准备工作
下面以 Cifar10 为了来说明 PTQ/QAT 的量化流程。
定义几个[辅助函数](https://github.com/pytorch/examples/blob/master/imagenet/main.py)来帮助评估模型。
设置超参数:
定义数据集和数据加载器:
查看数据集的 batch 次数:
获取训练和测试数据集的大小:
### 微调浮点模型
配置浮点模型:
定义模型:
定义微调的函数 {class}`torchq.xinet.CV`.{func}`train_fine_tuning` 用于模型。
微调浮点模型:
保存模型:
### 配置可量化模型
加载浮点模型:
查看浮点模型的信息:
可以先查看融合前的 inverted residual 块:
融合模块:
查看融合后的 inverted residual 块:
为了得到“基线”精度,看看融合模块的非量化模型的精度:
这将是我们进行比较的基准。接下来,尝试不同的量化方法。
### PTQ 实战
指定量化配置(从简单的最小/最大范围估计和加权的逐张量量化开始):
开始校准准备:
用数据集校准:
转换为量化模型:
融合并量化后,查看融合模块的 Inverted Residual 块:
量化后的模型大小:
评估:
使用了简单的 min/max 观测器来确定量化参数,将模型的大小减少到了 2.36 MB 以下,几乎减少了 4 倍。
此外,通过使用不同的量化配置来显著提高精度(对于量化 ARM 架构的推荐配置重复同样的练习)。该配置的操作如下:
- 在 per-channel 基础上量化权重
- 使用直方图观测器,收集激活的直方图,然后以最佳方式选择量化参数。
仅仅改变这种量化配置方法,就可以将准确度提高到 $80.42\%$ 以上!尽管如此,这还是比 $95\%$ 的基线水平低了 $15\%$。
### QAT 实战
使用 QAT,所有的权值和激活都在前向和后向训练过程中被“伪量化”:也就是说,浮点值被舍入以模拟 int8 值,但所有的计算仍然使用浮点数完成。因此,训练过程中的所有权重调整都是在“感知到”模型最终将被量化的情况下进行的;因此,在量化之后,这种方法通常比动态量化或训练后的静态量化产生更高的精度。
实际执行 QAT 的总体工作流程与之前非常相似:
- 可以使用与以前相同的模型:不需要为量化感知训练做额外的准备。
- 需要使用 `qconfig` 来指定在权重和激活之后插入何种类型的伪量化,而不是指定观测者。
最后,`prepare_qat` 执行“伪量化”,为量化感知训练准备模型:
Inverted Residual Block:准备好 QAT 后,注意伪量化模块:
训练具有高精确度的量化模型要求在推理时对数值进行精确的建模。因此,对于量化感知训练,我们对训练循环进行如下修改:
- 将批处理范数转换为训练结束时的运行均值和方差,以更好地匹配推理数值。
- 冻结量化器参数(尺度和零点)并微调权重。
由于量化模型暂仅支持 CPU,故而需要先将模型转换为 CPU 版本,则转为量化版本:
量化感知训练在整个数据集上的准确率超过 $94.4\%$,接近浮点精度 $95\%$。
更多关于 QAT 的内容:
- QAT 是后训练量化技术的超集,允许更多的调试。例如,我们可以分析模型的准确性是否受到权重或激活量化的限制。
- 也可以在浮点上模拟量化模型的准确性,因为使用伪量化来模拟实际量化算法的数值。
- 也可以很容易地模拟训练后量化。
保存 QAT 模型:
### 小结
同样可以使用 {func}`~torch.ao.quantization.quantize.quantize` 和 {func}`~torch.ao.quantization.quantize.quantize_qat` 简化流程。
比如,QAT 流程可以这样:
| 0.467818 | 0.963814 |
### Adequação de modelos e regressão não linear
regressão com variaveis transformadas
b1_inf = (xy_sum - x_sum*y_sum/n) / (x_sum**2 - (x_sum**2)/n)
b0_inf = (y_sum - b1_inf*x_sum) / n
##### Imports
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
import seaborn as sns
import math
from statsmodels.graphics.gofplots import ProbPlot
from scipy.stats import t
from numpy.core.fromnumeric import mean
```
##### Help functions
```
def df_residual(x, y):
#create a new df for residual analysis
#variable response = y
#variable predict = x
#predict variable response
y_pred=list(modelo.predict())
#sns.resid(modelo)
resi=list(modelo.resid)
#create instance influence
influence = modelo.get_influence()
#get standard residuals
stdresid = list(influence.resid_studentized_internal)
#e/e* verify proportionality between error and standard error
prop=np.divide(resi,stdresid)
#tabela geral
df = pd.DataFrame(list(zip(x, y, y_pred, resi, stdresid, prop)),
columns =['x', 'y', 'yc', 'e', 'e*', 'e/e*'])
return df
```
##### Load dataset
```
df = pd.read_csv('data/carst.csv', delimiter=',')
df
```
##### 1. Faça uma análise de resíduos para carros do tipo 0 (dataset carst_csv)
```
#graph dispersion (cap_vol x consumo)
sns.lmplot(x='cap_vol', y='consumo',hue='Etype', data=df);plt.grid(True)
#select cars type = 0
df1 = df[df['Etype'] == 0]
df1
#dispersion graph
sns.lmplot(x='cap_vol', y='consumo',hue='brand/model/year', data=df1);
#Regression results
#select a response variable
y = df1['consumo']
#select a predict variable
x = df1['cap_vol']
#add a predict constant
x = sm.add_constant(x)
#fit linear regression model
modelo = sm.OLS(y, x).fit()
print(modelo.summary())
#equation of straight line
b=modelo.params
b0=b[0] #intercepto
b1=b[1] #inclinação
print('y= {0}{1} x'.format(b0,b1))
#confidence interval b0 and b1
a = modelo.conf_int(alpha=0.05, cols=None)
a
#create a df for residual analysis
dft = df_residual(df1['cap_vol'], df1['consumo'])
dft
#graph (y x yc)
sns.lmplot(x='y', y='yc', data=dft);plt.grid(True)
#graph (e* x x)
sns.scatterplot(x='x', y='e*', data=dft);plt.grid(True)
plt.xlabel('x= Capacidade Volumétrica')
plt.ylabel('e*= Resíduos padronizados')
plt.axhline(y=0, color='black', linestyle='--', linewidth=1)
plt.show()
#graph (e* x y)
sns.scatterplot(x='y', y='e*', data=dft);plt.grid(True)
plt.xlabel('y= Consumo')
plt.ylabel('e*= Resíduos padronizados')
plt.axhline(y=0, color='black', linestyle='--', linewidth=1)
plt.show()
#graph V(normal distribution)
influence = modelo.get_influence()
QQ = ProbPlot(influence.resid_studentized_internal)
plot_lm_2 = QQ.qqplot(line='45', alpha=0.5, color='#4C72B0', lw=1)
plot_lm_2.axes[0].set_xlabel('Percentil')
plot_lm_2.axes[0].set_ylabel('Resíduos padronizados')
```
#### 2. Os seguintes dados: x=tempo de fritura (s) e y= teor de umidade (%).
##### a. Construa um gráfico de dispersão de y versus x e comente.
```
#data
x = [5, 10, 15, 20, 25, 30, 45, 60]
y= [16.3, 9.7, 8.1, 4.2, 3.4, 2.9, 1.9, 1.3]
#create df
df3 = pd.DataFrame({'x':x, 'y':y})
#dispersion graph (x x y)
plt.scatter(df3['x'], df3['y'])
plt.xlabel('tempo de fritura')
plt.ylabel('teor de umidade')
plt.grid(True)
plt.show()
```
##### b. Construa um gráfico de dispersão dos pares (ln(x), ln(y)) e comente.
```
#linearization
vtx=np.log(df3['x'])
vty=np.log(df3['y'])
#dispersion graph
plt.scatter(vtx, vty)
plt.xlabel('Tempo de fritura (s)');
plt.ylabel('Teor de umidade (%)')
plt.title('Gráfico com variáveis x e y transformadas')
plt.grid(True)
plt.show()
#df comparison
df4 = pd.DataFrame({'x':x, 'y':y, 'ln_x':vtx, 'ln_y':vty})
df4
```
##### c. Qual é a relação probabilística entre x e y sugerido pelo padrão linear no gráfico da parte (b)?
```
#Regression results
#select a response variable
y = vty
#select a predict variable
x = vtx
#add a predict constant
x = sm.add_constant(x)
#fit linear regression model
modelo = sm.OLS(y, x).fit()
print(modelo.summary())
# parâmetros da reta intrinsecamente linear
b=modelo.params
b0=b[0] #intercepto
b1=b[1] #inclinação
print('y= {0}{1} x'.format(b0,b1))
```
##### d. Preveja o valor do teor de umidade ao fritar as batatas por 20s, de uma forma que transmite informações sobre confiabilidade e precisão.
```
#t_student
alpha = 0.05 #nível de significia = 5%
df = len(x) - 2 #gl (n - 2)
t_student = t.ppf(1 - alpha/2, df)
print('t=: {}'.format(t_student))
x=df3['x']
y=df3['y']
vtx=np.log(x)
vty=np.log(y)
y_pred=list(modelo.predict())
#sns.resid(modelo)
resi=(modelo.resid)
#SQE = sum(y - y_inf)
sqe = np.sum(resi*resi)
#s2 = sqe/n-2
gl = modelo.df_resid
s2 = sqe/gl
#s
s = pow(s2, 1/2)
sqe, s2, s
#sy_inf = s * pow(((1/n) + ((vtxs-vtxm)**2)/sxx), 1/2)
#x* = ln(20)
vtxs = np.log(20)
#x mean
vtxm = vtx.mean()
#n
n = modelo.df_resid + 2
#sxx
xx = np.sum(vtx*vtx)
xau = np.sum(vtx)
sxx = xx-xau * xau/n
#sy_inf
sy_inf = s * pow(((1/n)+(vtxs-vtxm)*(vtxs-vtxm)/sxx),1/2)
sy_inf
#valor esperado de x = 20 deve ser usado como ln20(vtxs)
yln20 = b0 + (b1*vtxs)
yln20
#intervalo de previsão
sy_ip = np.sqrt((s2) + (sy_inf**2))
#desvio padrao calculado
ip = t_student * sy_ip
#ip linearized model
ip_yln20 = ([yln20 - ip, yln20 + ip])
#ip in original model(exponential)
#interval 1
ip_yexp200=pow(math.e,ip_yln20[0])
#interval 2
ip_yexp201=pow(math.e,ip_yln20[1])
ip_yln20, ip_yexp200, ip_yexp201
```
##### e. Analise os resíduos do ajuste do modelo de regressão linear para os dados transformados e comente.
```
dft2 = df_residual(vtx, vty)
dft2
#normal distribution
QQ = ProbPlot(influence.resid_studentized_internal)
plot_lm_2 = QQ.qqplot(line='45', alpha=0.5, color='#4C72B0', lw=1)
plot_lm_2.axes[0].set_xlabel('Percentil')
plot_lm_2.axes[0].set_ylabel('Resíduos padronizados')
```
#### 3. Os dados a seguir: índice de massa de queima(x) e o comprimento da chama(y)
##### a. Estime os parâmetros do modelo de função potência
```
x = [1.7, 2.2, 2.3, 2.6, 2.7, 3.0, 3.2, 3.3, 4.1, 4.3, 4.6, 5.7, 6.1]
y = [1.3, 1.8, 1.6, 2.0, 2.1, 2.2, 3.0, 2.6, 4.1, 3.7, 5.0, 5.8, 5.3]
vtx = np.log(x)
vty = np.log(y)
df5 = pd.DataFrame({'x':x, 'y':y, 'ln_x':vtx, 'ln_y':vty})
#Regression results
#add a predict constant
vtx = sm.add_constant(vtx)
#fit linear regression model
modelo = sm.OLS(vty, vtx).fit()
print(modelo.summary())
#equation of straight line
b=modelo.params
b0=b[0] #intercepto
b1=b[1] #inclinação
print('y= {0}+{1} x'.format(b0,b1))
#original representation
b0 = pow(math.e, b0)
print('y = {}x^{}'.format(b0, b1))
```
##### b. Construa gráficos de diagnóstico para verificar se uma função de potência é um modelo apropriado
```
vtx = np.log(x)
vty = np.log(y)
dft5 = df_residual(vtx, vty)
dft5
#graph (y x yc)
sns.lmplot(x='y', y='yc', data=dft5);plt.grid(True)
#graph (e* x x)
sns.scatterplot(x='x', y='e*', data=dft5);plt.grid(True)
plt.xlabel('x= Massa de queima')
plt.ylabel('e*= Resíduos padronizados')
plt.axhline(y=0, color='black', linestyle='--', linewidth=1)
plt.show()
#graph (e* x y)
sns.scatterplot(x='y', y='e*', data=dft5);plt.grid(True)
plt.xlabel('y= Comprimento da chama')
plt.ylabel('e*= Resíduos padronizados')
plt.axhline(y=0, color='black', linestyle='--', linewidth=1)
plt.show()
#graph V(normal distribution)
influence = modelo.get_influence()
QQ = ProbPlot(influence.resid_studentized_internal)
plot_lm_2 = QQ.qqplot(line='45', alpha=0.5, color='#4C72B0', lw=1)
plot_lm_2.axes[0].set_xlabel('Percentil')
plot_lm_2.axes[0].set_ylabel('Resíduos padronizados')
```
#### 4. Uma amostra de 20 observações, foi submetida ao processo de determinação da reta de regressão. Quando a variável preditora adota o valor de 3,5, a variável resposta adota o valor de 15. Após a obtenção da reta verifica-se que o valor esperado de da variável resposta para esse valor de 3,5 é 14,23. O valor médio da variável preditora é 2,2, o Sxx = 548 e o desvio padrão estimado s=0,98. Com todos estes dados calcular o valor do resíduo padronizado para a o dado especificado no problema.
```
n = 20
x = 3.5
y = 15
y_inf = 14.23
x_mean = 2.2
sxx = 548
s = 0.98
#fórmula para calculo do resíduo padronizado (obter resultado sem utilizar a função)
e_std = y - y_inf / s * pow(1 - (1/n) - ((x - x_mean)**2/sxx),1/2)
e_std
```
#### 5. Um conjunto de dados foi coletado e e visto que eles tem a caraterística de ser representados por um modelo logarítmico. Dito conjunto observa-se na seguinte tabela. Realizando a transformação de variáveis adequada para o modelo logarítmico, qual é o conjunto de dados adequado?
```
#Transforma apenas x
x = [1, 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11, 12]
y = [3.8, 4.5, 4.7, 5.0, 5.1, 5.5, 5.6, 5.9, 6.0, 6.2, 6.5, 6.6]
vtx = np.log(x)
df = pd.DataFrame({'y':y, 'vtx':vtx})
df
```
#### 6. O artigo “Effect of crystal orientation on fatigue failure of single crystal nickel base turbine blade superalloys” (J. of Engineering for Gas Turbines and Power, 2002: 161-176) apresentou os dados a seguir e ajustou um modelo de regressão não linear a fim de prever a amplitude do esforço dos ciclos em motores de avião até uma falha ocorrer.
```
x = (1326,1593,4414,5673,29516,26,843,1016,3410,7101,7356,7904,79,4175,34676,114789,2672,7532,30220)
y = (0.01495,0.01470,0.01100,0.01190,0.00873,0.01819,0.00810,0.00801,0.00600,0.00575,0.00576,0.00580,0.01212,0.00782,0.00596,0.00600,0.00880,0.00883,0.00676)
vtx = np.log(x)
vty = np.log(y)
#construir o DataFrame e nomear as colunas
df = pd.DataFrame(list(zip(vtx, y)), columns =["ln_x","y"])
vtx = df['ln_x']
y = df['y']
#Regressão linear com as variáveis transformadas
vtx = sm.add_constant(vtx)
modelo = sm.OLS(y, vtx ).fit() #atenção esta variável será usada no IC
# parâmetros da reta
b=modelo.params
b0=b[0] #intercepto
b1=b[1] #inclinação
print("Função intrinsecamente linear y= {0}{1}x'".format(b0,b1))
#RESULTADO
y = b0 + (b1 * np.log(2710))
print('valor previsto para x = 2710: y = {}'.format(y))
```
|
github_jupyter
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
import seaborn as sns
import math
from statsmodels.graphics.gofplots import ProbPlot
from scipy.stats import t
from numpy.core.fromnumeric import mean
def df_residual(x, y):
#create a new df for residual analysis
#variable response = y
#variable predict = x
#predict variable response
y_pred=list(modelo.predict())
#sns.resid(modelo)
resi=list(modelo.resid)
#create instance influence
influence = modelo.get_influence()
#get standard residuals
stdresid = list(influence.resid_studentized_internal)
#e/e* verify proportionality between error and standard error
prop=np.divide(resi,stdresid)
#tabela geral
df = pd.DataFrame(list(zip(x, y, y_pred, resi, stdresid, prop)),
columns =['x', 'y', 'yc', 'e', 'e*', 'e/e*'])
return df
df = pd.read_csv('data/carst.csv', delimiter=',')
df
#graph dispersion (cap_vol x consumo)
sns.lmplot(x='cap_vol', y='consumo',hue='Etype', data=df);plt.grid(True)
#select cars type = 0
df1 = df[df['Etype'] == 0]
df1
#dispersion graph
sns.lmplot(x='cap_vol', y='consumo',hue='brand/model/year', data=df1);
#Regression results
#select a response variable
y = df1['consumo']
#select a predict variable
x = df1['cap_vol']
#add a predict constant
x = sm.add_constant(x)
#fit linear regression model
modelo = sm.OLS(y, x).fit()
print(modelo.summary())
#equation of straight line
b=modelo.params
b0=b[0] #intercepto
b1=b[1] #inclinação
print('y= {0}{1} x'.format(b0,b1))
#confidence interval b0 and b1
a = modelo.conf_int(alpha=0.05, cols=None)
a
#create a df for residual analysis
dft = df_residual(df1['cap_vol'], df1['consumo'])
dft
#graph (y x yc)
sns.lmplot(x='y', y='yc', data=dft);plt.grid(True)
#graph (e* x x)
sns.scatterplot(x='x', y='e*', data=dft);plt.grid(True)
plt.xlabel('x= Capacidade Volumétrica')
plt.ylabel('e*= Resíduos padronizados')
plt.axhline(y=0, color='black', linestyle='--', linewidth=1)
plt.show()
#graph (e* x y)
sns.scatterplot(x='y', y='e*', data=dft);plt.grid(True)
plt.xlabel('y= Consumo')
plt.ylabel('e*= Resíduos padronizados')
plt.axhline(y=0, color='black', linestyle='--', linewidth=1)
plt.show()
#graph V(normal distribution)
influence = modelo.get_influence()
QQ = ProbPlot(influence.resid_studentized_internal)
plot_lm_2 = QQ.qqplot(line='45', alpha=0.5, color='#4C72B0', lw=1)
plot_lm_2.axes[0].set_xlabel('Percentil')
plot_lm_2.axes[0].set_ylabel('Resíduos padronizados')
#data
x = [5, 10, 15, 20, 25, 30, 45, 60]
y= [16.3, 9.7, 8.1, 4.2, 3.4, 2.9, 1.9, 1.3]
#create df
df3 = pd.DataFrame({'x':x, 'y':y})
#dispersion graph (x x y)
plt.scatter(df3['x'], df3['y'])
plt.xlabel('tempo de fritura')
plt.ylabel('teor de umidade')
plt.grid(True)
plt.show()
#linearization
vtx=np.log(df3['x'])
vty=np.log(df3['y'])
#dispersion graph
plt.scatter(vtx, vty)
plt.xlabel('Tempo de fritura (s)');
plt.ylabel('Teor de umidade (%)')
plt.title('Gráfico com variáveis x e y transformadas')
plt.grid(True)
plt.show()
#df comparison
df4 = pd.DataFrame({'x':x, 'y':y, 'ln_x':vtx, 'ln_y':vty})
df4
#Regression results
#select a response variable
y = vty
#select a predict variable
x = vtx
#add a predict constant
x = sm.add_constant(x)
#fit linear regression model
modelo = sm.OLS(y, x).fit()
print(modelo.summary())
# parâmetros da reta intrinsecamente linear
b=modelo.params
b0=b[0] #intercepto
b1=b[1] #inclinação
print('y= {0}{1} x'.format(b0,b1))
#t_student
alpha = 0.05 #nível de significia = 5%
df = len(x) - 2 #gl (n - 2)
t_student = t.ppf(1 - alpha/2, df)
print('t=: {}'.format(t_student))
x=df3['x']
y=df3['y']
vtx=np.log(x)
vty=np.log(y)
y_pred=list(modelo.predict())
#sns.resid(modelo)
resi=(modelo.resid)
#SQE = sum(y - y_inf)
sqe = np.sum(resi*resi)
#s2 = sqe/n-2
gl = modelo.df_resid
s2 = sqe/gl
#s
s = pow(s2, 1/2)
sqe, s2, s
#sy_inf = s * pow(((1/n) + ((vtxs-vtxm)**2)/sxx), 1/2)
#x* = ln(20)
vtxs = np.log(20)
#x mean
vtxm = vtx.mean()
#n
n = modelo.df_resid + 2
#sxx
xx = np.sum(vtx*vtx)
xau = np.sum(vtx)
sxx = xx-xau * xau/n
#sy_inf
sy_inf = s * pow(((1/n)+(vtxs-vtxm)*(vtxs-vtxm)/sxx),1/2)
sy_inf
#valor esperado de x = 20 deve ser usado como ln20(vtxs)
yln20 = b0 + (b1*vtxs)
yln20
#intervalo de previsão
sy_ip = np.sqrt((s2) + (sy_inf**2))
#desvio padrao calculado
ip = t_student * sy_ip
#ip linearized model
ip_yln20 = ([yln20 - ip, yln20 + ip])
#ip in original model(exponential)
#interval 1
ip_yexp200=pow(math.e,ip_yln20[0])
#interval 2
ip_yexp201=pow(math.e,ip_yln20[1])
ip_yln20, ip_yexp200, ip_yexp201
dft2 = df_residual(vtx, vty)
dft2
#normal distribution
QQ = ProbPlot(influence.resid_studentized_internal)
plot_lm_2 = QQ.qqplot(line='45', alpha=0.5, color='#4C72B0', lw=1)
plot_lm_2.axes[0].set_xlabel('Percentil')
plot_lm_2.axes[0].set_ylabel('Resíduos padronizados')
x = [1.7, 2.2, 2.3, 2.6, 2.7, 3.0, 3.2, 3.3, 4.1, 4.3, 4.6, 5.7, 6.1]
y = [1.3, 1.8, 1.6, 2.0, 2.1, 2.2, 3.0, 2.6, 4.1, 3.7, 5.0, 5.8, 5.3]
vtx = np.log(x)
vty = np.log(y)
df5 = pd.DataFrame({'x':x, 'y':y, 'ln_x':vtx, 'ln_y':vty})
#Regression results
#add a predict constant
vtx = sm.add_constant(vtx)
#fit linear regression model
modelo = sm.OLS(vty, vtx).fit()
print(modelo.summary())
#equation of straight line
b=modelo.params
b0=b[0] #intercepto
b1=b[1] #inclinação
print('y= {0}+{1} x'.format(b0,b1))
#original representation
b0 = pow(math.e, b0)
print('y = {}x^{}'.format(b0, b1))
vtx = np.log(x)
vty = np.log(y)
dft5 = df_residual(vtx, vty)
dft5
#graph (y x yc)
sns.lmplot(x='y', y='yc', data=dft5);plt.grid(True)
#graph (e* x x)
sns.scatterplot(x='x', y='e*', data=dft5);plt.grid(True)
plt.xlabel('x= Massa de queima')
plt.ylabel('e*= Resíduos padronizados')
plt.axhline(y=0, color='black', linestyle='--', linewidth=1)
plt.show()
#graph (e* x y)
sns.scatterplot(x='y', y='e*', data=dft5);plt.grid(True)
plt.xlabel('y= Comprimento da chama')
plt.ylabel('e*= Resíduos padronizados')
plt.axhline(y=0, color='black', linestyle='--', linewidth=1)
plt.show()
#graph V(normal distribution)
influence = modelo.get_influence()
QQ = ProbPlot(influence.resid_studentized_internal)
plot_lm_2 = QQ.qqplot(line='45', alpha=0.5, color='#4C72B0', lw=1)
plot_lm_2.axes[0].set_xlabel('Percentil')
plot_lm_2.axes[0].set_ylabel('Resíduos padronizados')
n = 20
x = 3.5
y = 15
y_inf = 14.23
x_mean = 2.2
sxx = 548
s = 0.98
#fórmula para calculo do resíduo padronizado (obter resultado sem utilizar a função)
e_std = y - y_inf / s * pow(1 - (1/n) - ((x - x_mean)**2/sxx),1/2)
e_std
#Transforma apenas x
x = [1, 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11, 12]
y = [3.8, 4.5, 4.7, 5.0, 5.1, 5.5, 5.6, 5.9, 6.0, 6.2, 6.5, 6.6]
vtx = np.log(x)
df = pd.DataFrame({'y':y, 'vtx':vtx})
df
x = (1326,1593,4414,5673,29516,26,843,1016,3410,7101,7356,7904,79,4175,34676,114789,2672,7532,30220)
y = (0.01495,0.01470,0.01100,0.01190,0.00873,0.01819,0.00810,0.00801,0.00600,0.00575,0.00576,0.00580,0.01212,0.00782,0.00596,0.00600,0.00880,0.00883,0.00676)
vtx = np.log(x)
vty = np.log(y)
#construir o DataFrame e nomear as colunas
df = pd.DataFrame(list(zip(vtx, y)), columns =["ln_x","y"])
vtx = df['ln_x']
y = df['y']
#Regressão linear com as variáveis transformadas
vtx = sm.add_constant(vtx)
modelo = sm.OLS(y, vtx ).fit() #atenção esta variável será usada no IC
# parâmetros da reta
b=modelo.params
b0=b[0] #intercepto
b1=b[1] #inclinação
print("Função intrinsecamente linear y= {0}{1}x'".format(b0,b1))
#RESULTADO
y = b0 + (b1 * np.log(2710))
print('valor previsto para x = 2710: y = {}'.format(y))
| 0.516839 | 0.903847 |
<a href="https://colab.research.google.com/github/cagdastopcu/omission-of-visual-stimuli/blob/main/function_examples_for_data_analysis.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
#Pre-analysis setup
Setup process for importing Allen SDK materials and data:
A lot of this code is taken directly from https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/projects/neurons/load_Allen_Visual_Behavior_from_SDK.ipynb
Install pip so we can install Allen's mindscope package.
```
!python -m pip install --upgrade pip
!pip install mindscope_utilities --upgrade
```
This notebook always needs to have the runtime restarted. There should be a button above this line.
Import dependencies. The usual stuff, scientific computing stuff, seaborn for plotting, mindscope from allen. AllenSDK's visual behavior packages.
```
import os
import numpy as np
import pandas as pd
from tqdm import tqdm
import seaborn as sns
import matplotlib.pyplot as plt
# Here are the Allen packages:
import mindscope_utilities
import mindscope_utilities.visual_behavior_ophys as ophys
from allensdk.brain_observatory.behavior.behavior_project_cache import VisualBehaviorOphysProjectCache
```
This dataset need to be downloaded somewhere, the default Allen code just uses a local temporary directory.
```
data_storage_directory = "/temp" # Note: this path must exist on your local drive
cache = VisualBehaviorOphysProjectCache.from_s3_cache(cache_dir=data_storage_directory)
# Setup the tables for the sessions and experiments:
session_table = cache.get_ophys_session_table()
experiment_table = cache.get_ophys_experiment_table()
```
# Looking through experiment directories to find useful data
Let's take a look at what the session table looks like. I think this is like a meta index of all the different sessions with a bunch of metadata about them.
```
session_table.head()
```
There are a ton of different properties. We want to look at different cell lines because they relate to the cell types we're interested in. Let's get the unique tags for "cre_line", just because the text output for that tag is pretty easy to read.
```
drivers = session_table["cre_line"].unique()
print(drivers)
```
Great, now we can get the indices of these cell types from the session table. Let's make a function I guess.
```
def get_cell_type_index(cell_type,session_table):
"""Get the indexes of the Allen sessions that use a particular cre line.
Args:
cell_type: a string of the name of the cre line you want. This uses regex so you can try to be less descriptive, like 'sst' is good enoguh for Sst-IRES-cre.
session_table: The table containing the sessions from Allen.
Returns:
ix (1D array): An index of sessions that use that cre line.
"""
# Look through the cre_line collumn and see if the cell_type string is contained in the output, then write down the index.
ix = session_table.cre_line.str.contains(cell_type,regex=True).index
return ix
test_ix = get_cell_type_index('slc',session_table)
print(test_ix)
```
# Working with experiment ids
```
ophys_session_id = test_ix[5]
session_table.loc[ophys_session_id]
experiments = {}
ophys_experiment_ids = session_table.loc[ophys_session_id]['ophys_experiment_id']
for ophys_experiment_id in ophys_experiment_ids:
experiments[ophys_experiment_id] = cache.get_behavior_ophys_experiment(ophys_experiment_id)
neural_data = []
for ophys_experiment_id in tqdm(experiments.keys()): #tqdm is a package that shows progress bars for items that are iterated over
this_experiment = experiments[ophys_experiment_id]
this_experiment_neural_data = ophys.build_tidy_cell_df(this_experiment)
# add some columns with metadata for the experiment
metadata_keys = [
'ophys_experiment_id',
'ophys_session_id',
'targeted_structure',
'imaging_depth',
'equipment_name',
'cre_line',
'mouse_id',
'sex',
]
for metadata_key in metadata_keys:
this_experiment_neural_data[metadata_key] = this_experiment.metadata[metadata_key]
# append the data for this experiment to a list
neural_data.append(this_experiment_neural_data)
# concatate the list of dataframes into a single dataframe
neural_data = pd.concat(neural_data)
mindscope_utilities.event_triggered_response?
cell_ids = neural_data['cell_specimen_id'].unique()
single_cell_timeseries = neural_data.query('cell_specimen_id == 1086557208')
stimulus_table = experiments[ophys_experiment_ids[0]].stimulus_presentations.drop(columns = ['image_set']) # dropping the 'image_set' column to avoid confusion. Image_set column contains a unique string for set of images presented in a session.
cell_id = cell_ids[11]
etr = mindscope_utilities.event_triggered_response(
data = neural_data.query('cell_specimen_id == @cell_id'),
t = 'timestamps',
y = 'dff',
event_times = stimulus_table.query('omitted')['start_time'],
t_before=3,
t_after=3,
output_sampling_rate = 50,
)
etr
sns.lineplot(
data=etr,
x='time',
y='dff',
n_boot=500
)
```
Let's functionalize this, since we want these plots.
```
def get_response(event_type, neural_data, c_id = 1, t_span = 3, plot = True):
"""Get the activity of a particular cell in response to an event
Args:
event_type: string that is a collumn in neural_data. e.g. 'omitted' or 'is_change'
neural_data: the neural data table.
c_id: cell id, which cell to get the response from.
t_span: what timespan to sample, in seconds.
plot: if true, a plot is generated.
Returns:
etr (1D array): The cell activity from -t_span s to +t_span s
"""
cell_ids = neural_data['cell_specimen_id'].unique()
cell_id = cell_ids[c_id]
etr = mindscope_utilities.event_triggered_response(
data = neural_data.query('cell_specimen_id == @cell_id'),
t = 'timestamps',
y = 'dff',
event_times = stimulus_table.query(event_type)['start_time'],
t_before=t_span,
t_after=t_span,
output_sampling_rate = 50,
)
etr
if plot:
sns.lineplot(
data=etr,
x='time',
y='dff',
n_boot=500
)
return etr
get_response('is_change',neural_data,11,1,True)
get_response('omitted',neural_data,11,2,plot=True)
cell_ids = neural_data['cell_specimen_id'].unique()
for c in range(len(cell_ids)):
get_response('omitted',neural_data,c,.5,plot=True)
```
# Plotting means from cell types
```
excitatory_cell_examples = get_cell_type_index('slc',session_table)
vip_cell_examples = get_cell_type_index('vip',session_table)
sst_cell_examples = get_cell_type_index('sst',session_table)
excitatory_cell_examples = excitatory_cell_examples[1]
vip_cell_examples = vip_cell_examples[1]
sst_cell_examples = sst_cell_examples[1]
exc_1 = session_table.loc[excitatory_cell_examples]
vip_1 = session_table.loc[vip_cell_examples]
sst_1 = session_table.loc[sst_cell_examples]
experiments = {}
ophys_experiment_ids = session_table.loc[excitatory_cell_examples]['ophys_experiment_id']
for ophys_experiment_id in ophys_experiment_ids:
experiments[ophys_experiment_id] = cache.get_behavior_ophys_experiment(ophys_experiment_id)
neural_data = []
for ophys_experiment_id in tqdm(experiments.keys()): #tqdm is a package that shows progress bars for items that are iterated over
this_experiment = experiments[ophys_experiment_id]
this_experiment_neural_data = ophys.build_tidy_cell_df(this_experiment)
# add some columns with metadata for the experiment
metadata_keys = [
'ophys_experiment_id',
'ophys_session_id',
'targeted_structure',
'imaging_depth',
'equipment_name',
'cre_line',
'mouse_id',
'sex',
]
for metadata_key in metadata_keys:
this_experiment_neural_data[metadata_key] = this_experiment.metadata[metadata_key]
# append the data for this experiment to a list
neural_data.append(this_experiment_neural_data)
# concatate the list of dataframes into a single dataframe
neural_data = pd.concat(neural_data)
cell_ids = neural_data['cell_specimen_id'].unique()
sample_data = get_response('omitted',neural_data,c,1,plot=False)
responses = np.zeros((len(sample_data),len(cell_ids)),dtype=float)
time = np.zeros((len(sample_data),len(cell_ids)),dtype=float)
for c in range(len(cell_ids)):
dummy = get_response('omitted',neural_data,c,1,plot=False)
responses[:,c] = dummy.dff
time[:,c] = dummy.time
sns.lineplot(
x=time.mean(axis=1),
y=responses.mean(axis=1)
)
```
|
github_jupyter
|
!python -m pip install --upgrade pip
!pip install mindscope_utilities --upgrade
import os
import numpy as np
import pandas as pd
from tqdm import tqdm
import seaborn as sns
import matplotlib.pyplot as plt
# Here are the Allen packages:
import mindscope_utilities
import mindscope_utilities.visual_behavior_ophys as ophys
from allensdk.brain_observatory.behavior.behavior_project_cache import VisualBehaviorOphysProjectCache
data_storage_directory = "/temp" # Note: this path must exist on your local drive
cache = VisualBehaviorOphysProjectCache.from_s3_cache(cache_dir=data_storage_directory)
# Setup the tables for the sessions and experiments:
session_table = cache.get_ophys_session_table()
experiment_table = cache.get_ophys_experiment_table()
session_table.head()
drivers = session_table["cre_line"].unique()
print(drivers)
def get_cell_type_index(cell_type,session_table):
"""Get the indexes of the Allen sessions that use a particular cre line.
Args:
cell_type: a string of the name of the cre line you want. This uses regex so you can try to be less descriptive, like 'sst' is good enoguh for Sst-IRES-cre.
session_table: The table containing the sessions from Allen.
Returns:
ix (1D array): An index of sessions that use that cre line.
"""
# Look through the cre_line collumn and see if the cell_type string is contained in the output, then write down the index.
ix = session_table.cre_line.str.contains(cell_type,regex=True).index
return ix
test_ix = get_cell_type_index('slc',session_table)
print(test_ix)
ophys_session_id = test_ix[5]
session_table.loc[ophys_session_id]
experiments = {}
ophys_experiment_ids = session_table.loc[ophys_session_id]['ophys_experiment_id']
for ophys_experiment_id in ophys_experiment_ids:
experiments[ophys_experiment_id] = cache.get_behavior_ophys_experiment(ophys_experiment_id)
neural_data = []
for ophys_experiment_id in tqdm(experiments.keys()): #tqdm is a package that shows progress bars for items that are iterated over
this_experiment = experiments[ophys_experiment_id]
this_experiment_neural_data = ophys.build_tidy_cell_df(this_experiment)
# add some columns with metadata for the experiment
metadata_keys = [
'ophys_experiment_id',
'ophys_session_id',
'targeted_structure',
'imaging_depth',
'equipment_name',
'cre_line',
'mouse_id',
'sex',
]
for metadata_key in metadata_keys:
this_experiment_neural_data[metadata_key] = this_experiment.metadata[metadata_key]
# append the data for this experiment to a list
neural_data.append(this_experiment_neural_data)
# concatate the list of dataframes into a single dataframe
neural_data = pd.concat(neural_data)
mindscope_utilities.event_triggered_response?
cell_ids = neural_data['cell_specimen_id'].unique()
single_cell_timeseries = neural_data.query('cell_specimen_id == 1086557208')
stimulus_table = experiments[ophys_experiment_ids[0]].stimulus_presentations.drop(columns = ['image_set']) # dropping the 'image_set' column to avoid confusion. Image_set column contains a unique string for set of images presented in a session.
cell_id = cell_ids[11]
etr = mindscope_utilities.event_triggered_response(
data = neural_data.query('cell_specimen_id == @cell_id'),
t = 'timestamps',
y = 'dff',
event_times = stimulus_table.query('omitted')['start_time'],
t_before=3,
t_after=3,
output_sampling_rate = 50,
)
etr
sns.lineplot(
data=etr,
x='time',
y='dff',
n_boot=500
)
def get_response(event_type, neural_data, c_id = 1, t_span = 3, plot = True):
"""Get the activity of a particular cell in response to an event
Args:
event_type: string that is a collumn in neural_data. e.g. 'omitted' or 'is_change'
neural_data: the neural data table.
c_id: cell id, which cell to get the response from.
t_span: what timespan to sample, in seconds.
plot: if true, a plot is generated.
Returns:
etr (1D array): The cell activity from -t_span s to +t_span s
"""
cell_ids = neural_data['cell_specimen_id'].unique()
cell_id = cell_ids[c_id]
etr = mindscope_utilities.event_triggered_response(
data = neural_data.query('cell_specimen_id == @cell_id'),
t = 'timestamps',
y = 'dff',
event_times = stimulus_table.query(event_type)['start_time'],
t_before=t_span,
t_after=t_span,
output_sampling_rate = 50,
)
etr
if plot:
sns.lineplot(
data=etr,
x='time',
y='dff',
n_boot=500
)
return etr
get_response('is_change',neural_data,11,1,True)
get_response('omitted',neural_data,11,2,plot=True)
cell_ids = neural_data['cell_specimen_id'].unique()
for c in range(len(cell_ids)):
get_response('omitted',neural_data,c,.5,plot=True)
excitatory_cell_examples = get_cell_type_index('slc',session_table)
vip_cell_examples = get_cell_type_index('vip',session_table)
sst_cell_examples = get_cell_type_index('sst',session_table)
excitatory_cell_examples = excitatory_cell_examples[1]
vip_cell_examples = vip_cell_examples[1]
sst_cell_examples = sst_cell_examples[1]
exc_1 = session_table.loc[excitatory_cell_examples]
vip_1 = session_table.loc[vip_cell_examples]
sst_1 = session_table.loc[sst_cell_examples]
experiments = {}
ophys_experiment_ids = session_table.loc[excitatory_cell_examples]['ophys_experiment_id']
for ophys_experiment_id in ophys_experiment_ids:
experiments[ophys_experiment_id] = cache.get_behavior_ophys_experiment(ophys_experiment_id)
neural_data = []
for ophys_experiment_id in tqdm(experiments.keys()): #tqdm is a package that shows progress bars for items that are iterated over
this_experiment = experiments[ophys_experiment_id]
this_experiment_neural_data = ophys.build_tidy_cell_df(this_experiment)
# add some columns with metadata for the experiment
metadata_keys = [
'ophys_experiment_id',
'ophys_session_id',
'targeted_structure',
'imaging_depth',
'equipment_name',
'cre_line',
'mouse_id',
'sex',
]
for metadata_key in metadata_keys:
this_experiment_neural_data[metadata_key] = this_experiment.metadata[metadata_key]
# append the data for this experiment to a list
neural_data.append(this_experiment_neural_data)
# concatate the list of dataframes into a single dataframe
neural_data = pd.concat(neural_data)
cell_ids = neural_data['cell_specimen_id'].unique()
sample_data = get_response('omitted',neural_data,c,1,plot=False)
responses = np.zeros((len(sample_data),len(cell_ids)),dtype=float)
time = np.zeros((len(sample_data),len(cell_ids)),dtype=float)
for c in range(len(cell_ids)):
dummy = get_response('omitted',neural_data,c,1,plot=False)
responses[:,c] = dummy.dff
time[:,c] = dummy.time
sns.lineplot(
x=time.mean(axis=1),
y=responses.mean(axis=1)
)
| 0.585694 | 0.938011 |
# This data contains the results of an A/B test. <a href='https://tf-assets-prod.s3.amazonaws.com/tf-curric/data-science/ab_edited.csv'>Download it here.</a>
## For this analysis:
### <ol><li>Check for adequate sample sizes.</li><li>Check for changes over time in results.</li><li>Formally test for a significant difference in conversions between treatment and control groups.</li></ol>
```
import scipy.stats as st
import pandas as pd
import matplotlib.pyplot as plt
rawdata = pd.read_csv('https://tf-assets-prod.s3.amazonaws.com/tf-curric/data-science/ab_edited.csv')
display(rawdata.head(),
rawdata.info())
rawdata['datestamp'] = pd.DatetimeIndex(rawdata['timestamp']).date
display(rawdata.head(),
rawdata['datestamp'].min(),
rawdata['datestamp'].max()
)
rawdata['daysstamp'] = pd.DatetimeIndex(rawdata['timestamp']).day
display(rawdata.head())
```
#### <span style="color:blue">1. From this data, we can see there are over 290,000 records. In addition, we're able to see that the entireity of this data occurs in 2017-01, so we can filter the timestamp down to just the days for a cleaner final presentation</span>
```
datacon = rawdata[rawdata['group'] == 'control']
datatre = rawdata[rawdata['group'] == 'treatment']
display(datacon.head(),
datatre.head())
groupeddatacon = datacon.groupby('daysstamp')['converted'].apply(lambda x: (x==1).sum()).reset_index(name='count')
groupeddatatre = datatre.groupby('daysstamp')['converted'].apply(lambda x: (x==1).sum()).reset_index(name='count')
plt.bar(groupeddatacon['daysstamp'], groupeddatacon['count'], alpha=0.4, label='Control Group')
plt.bar(groupeddatatre['daysstamp'], groupeddatatre['count'], alpha=0.4, label='Treatment Group')
plt.xlabel('Days\nJanuary, 2017')
plt.ylabel('Converted / day')
plt.legend(loc='lower left')
plt.show()
groupeddatacon = datacon.groupby('daysstamp')['converted'].apply(lambda x: (x==1).sum()/(x!=2).sum()).reset_index(name='ratio')
groupeddatatre = datatre.groupby('daysstamp')['converted'].apply(lambda x: (x==1).sum()/(x!=2).sum()).reset_index(name='ratio')
plt.bar(groupeddatacon['daysstamp'], groupeddatacon['ratio'], alpha=0.4, label='Control Group')
plt.bar(groupeddatatre['daysstamp'], groupeddatatre['ratio'], alpha=0.4, label='Treatment Group')
plt.xlabel('Days\nJanuary, 2017')
plt.ylabel('(Converted / Sum) / day')
plt.legend(loc='lower left')
plt.show()
```
#### <span style="color:blue">2. From this data, we can see there doesn't seem to be any major fluctuation day-by-day for either the control group or the treatment group.</span>
```
elist = pd.DatetimeIndex(datacon['datestamp'].unique()).sort_values(ascending=True)
dlist = elist.strftime('%m/%d/%Y')
plist = []
tlist = []
for today in elist:
todaydatacon = datacon[datacon['datestamp'] == today]
todaydatatre = datatre[datatre['datestamp'] == today]
t, p = st.ttest_ind(todaydatacon['converted'], todaydatatre['converted'])
plist.append(p)
tlist.append(t)
resultsdf = pd.DataFrame(list(zip(dlist, tlist, plist)), columns=['Date', 'T-test Stat', 'T-test P'])
resultsdf['P < 0.05'] = resultsdf['T-test P'] < 0.05
resultsdf
```
#### <span style='color:blue'>Of the 23 days for which we have results, there was only one instance where P was less than 0.05. However, the overwhelming trend throughout the month, was the opposite. Therefore we Accept the Null Hypothesis that there is no significant difference in the means.</span>
|
github_jupyter
|
import scipy.stats as st
import pandas as pd
import matplotlib.pyplot as plt
rawdata = pd.read_csv('https://tf-assets-prod.s3.amazonaws.com/tf-curric/data-science/ab_edited.csv')
display(rawdata.head(),
rawdata.info())
rawdata['datestamp'] = pd.DatetimeIndex(rawdata['timestamp']).date
display(rawdata.head(),
rawdata['datestamp'].min(),
rawdata['datestamp'].max()
)
rawdata['daysstamp'] = pd.DatetimeIndex(rawdata['timestamp']).day
display(rawdata.head())
datacon = rawdata[rawdata['group'] == 'control']
datatre = rawdata[rawdata['group'] == 'treatment']
display(datacon.head(),
datatre.head())
groupeddatacon = datacon.groupby('daysstamp')['converted'].apply(lambda x: (x==1).sum()).reset_index(name='count')
groupeddatatre = datatre.groupby('daysstamp')['converted'].apply(lambda x: (x==1).sum()).reset_index(name='count')
plt.bar(groupeddatacon['daysstamp'], groupeddatacon['count'], alpha=0.4, label='Control Group')
plt.bar(groupeddatatre['daysstamp'], groupeddatatre['count'], alpha=0.4, label='Treatment Group')
plt.xlabel('Days\nJanuary, 2017')
plt.ylabel('Converted / day')
plt.legend(loc='lower left')
plt.show()
groupeddatacon = datacon.groupby('daysstamp')['converted'].apply(lambda x: (x==1).sum()/(x!=2).sum()).reset_index(name='ratio')
groupeddatatre = datatre.groupby('daysstamp')['converted'].apply(lambda x: (x==1).sum()/(x!=2).sum()).reset_index(name='ratio')
plt.bar(groupeddatacon['daysstamp'], groupeddatacon['ratio'], alpha=0.4, label='Control Group')
plt.bar(groupeddatatre['daysstamp'], groupeddatatre['ratio'], alpha=0.4, label='Treatment Group')
plt.xlabel('Days\nJanuary, 2017')
plt.ylabel('(Converted / Sum) / day')
plt.legend(loc='lower left')
plt.show()
elist = pd.DatetimeIndex(datacon['datestamp'].unique()).sort_values(ascending=True)
dlist = elist.strftime('%m/%d/%Y')
plist = []
tlist = []
for today in elist:
todaydatacon = datacon[datacon['datestamp'] == today]
todaydatatre = datatre[datatre['datestamp'] == today]
t, p = st.ttest_ind(todaydatacon['converted'], todaydatatre['converted'])
plist.append(p)
tlist.append(t)
resultsdf = pd.DataFrame(list(zip(dlist, tlist, plist)), columns=['Date', 'T-test Stat', 'T-test P'])
resultsdf['P < 0.05'] = resultsdf['T-test P'] < 0.05
resultsdf
| 0.210604 | 0.909747 |
# 进程和线程
今天我们使用的计算机进入多CPU或多核时代,而我们的操作系统支持多任务。
进程:就是操作系统中执行的一个程序,操作系统以进程为单位分配存储空间,每个进程都有自己的地址空间,数据栈以及其他用于跟踪进程执行的辅助数据。一个进程还可以拥有多个并发的执行线索,简单说就是拥有多个可以获得cpu调度的执行单元。
线程:执行单元。。。在某个时刻能够获得CPU的只有唯一的一个线程,多个线程共享了CPU的执行时间
例子: 在停车场,两辆汽车去停在一个车位,就是并行,两辆车停两个车位,就是并发。
```
from random import randint
from time import time, sleep
def download_task(filename):
print('开始下载%s...' % filename)
time_to_download = randint(5, 10)
sleep(time_to_download)
print('%s下载完成! 耗费了%d秒' % (filename, time_to_download))
def main():
start = time()
download_task('Python从入门到住院.pdf')
download_task('Peking Hot.avi')
end = time()
print('总共耗费了%.2f秒.' % (end - start))
if __name__ == '__main__':
main()
from multiprocessing import Process
from os import getpid
from random import randint
from time import time, sleep
def download_task(filename):
print('启动下载进程,进程号[%d].' % getpid())
print('开始下载%s...' % filename)
time_to_download = randint(5, 10)
sleep(time_to_download)
print('%s下载完成! 耗费了%d秒' % (filename, time_to_download))
def main():
start = time()
p1 = Process(target=download_task, args=('Python从入门到住院.pdf', ))
p1.start()
p2 = Process(target=download_task, args=('Peking Hot.avi', ))
p2.start()
p1.join()
p2.join()
end = time()
print('总共耗费了%.2f秒.' % (end - start))
if __name__ == '__main__':
main()
```
# 装饰器
```
def deco(func):
def wrap(*args,**kwargs):
return func(*args,**kwargs)
return wrap
@deco
def foo(a,b):
return a ** b
def deco(n):
def wrap1(func):
def wrap2(*args, **kwargs):
return func(*args, **kwargs)
return wrap2
return wrap1
# 调用过程
wrap1 = deco()
wrap2 = wrap1(foo)
foo = wrap2
foo()
# 单行形式
check_result(30)(foo)(4,8)
import random
def deco2(parms):
def deco(func):
def warp(*arg,**kwargs):
if parms:
kwargs['n1'] = -1
return func(*args,**kwargs)
return warp
return deco
@deco(True)
def A(n1,n2):
if n1 == n2:
print('验证密码正确')
else :
print('验证码错误')
num = random.randrange(1000,9999)
print('验证码是:%d'%num)
num2 = int(input('>>'))
A(n1 = num, n2 = num)
class Rectangle(object):
def __init__(self):
pass
def main(self):
width = float(input("长为:"))
heightd = float(input("宽为:"))
self.getArea(width,heightd)
self.getPerimeter(width,heightd)
def getArea(self,width,heightd):
Area = width * heightd
print('面积为:%.2f'%Area)
def getPerimeter(self,width,heightd):
Perimeter = (width + heightd) * 2
print('周长为:%.2f'%Perimeter)
if __name__ == '__main__':
rectangle = Rectangle()
rectangle.main()
class Fan(object):
def __init__(self):
pass
def main(self):
speed = int(input("1档,2档,3档:"))
on = bool()
radius1 = float(5)
radius2 = float(10)
color1 = str('blue')
color2 = str('yellow')
self.gongneng(speed,on,radius1,radius2,color1,color2)
def gongneng (self,speed,on,radius1,radius2,color1,color2):
if speed == 1:
print(on)
elif speed == 2:
print('半径为:',radius1,'颜色:',color1)
elif speed == 3:
print('半径为:',radius2,'颜色:',color2)
else:
pass
if __name__ == "__main__":
fan =Fan()
fan.main()
```
|
github_jupyter
|
from random import randint
from time import time, sleep
def download_task(filename):
print('开始下载%s...' % filename)
time_to_download = randint(5, 10)
sleep(time_to_download)
print('%s下载完成! 耗费了%d秒' % (filename, time_to_download))
def main():
start = time()
download_task('Python从入门到住院.pdf')
download_task('Peking Hot.avi')
end = time()
print('总共耗费了%.2f秒.' % (end - start))
if __name__ == '__main__':
main()
from multiprocessing import Process
from os import getpid
from random import randint
from time import time, sleep
def download_task(filename):
print('启动下载进程,进程号[%d].' % getpid())
print('开始下载%s...' % filename)
time_to_download = randint(5, 10)
sleep(time_to_download)
print('%s下载完成! 耗费了%d秒' % (filename, time_to_download))
def main():
start = time()
p1 = Process(target=download_task, args=('Python从入门到住院.pdf', ))
p1.start()
p2 = Process(target=download_task, args=('Peking Hot.avi', ))
p2.start()
p1.join()
p2.join()
end = time()
print('总共耗费了%.2f秒.' % (end - start))
if __name__ == '__main__':
main()
def deco(func):
def wrap(*args,**kwargs):
return func(*args,**kwargs)
return wrap
@deco
def foo(a,b):
return a ** b
def deco(n):
def wrap1(func):
def wrap2(*args, **kwargs):
return func(*args, **kwargs)
return wrap2
return wrap1
# 调用过程
wrap1 = deco()
wrap2 = wrap1(foo)
foo = wrap2
foo()
# 单行形式
check_result(30)(foo)(4,8)
import random
def deco2(parms):
def deco(func):
def warp(*arg,**kwargs):
if parms:
kwargs['n1'] = -1
return func(*args,**kwargs)
return warp
return deco
@deco(True)
def A(n1,n2):
if n1 == n2:
print('验证密码正确')
else :
print('验证码错误')
num = random.randrange(1000,9999)
print('验证码是:%d'%num)
num2 = int(input('>>'))
A(n1 = num, n2 = num)
class Rectangle(object):
def __init__(self):
pass
def main(self):
width = float(input("长为:"))
heightd = float(input("宽为:"))
self.getArea(width,heightd)
self.getPerimeter(width,heightd)
def getArea(self,width,heightd):
Area = width * heightd
print('面积为:%.2f'%Area)
def getPerimeter(self,width,heightd):
Perimeter = (width + heightd) * 2
print('周长为:%.2f'%Perimeter)
if __name__ == '__main__':
rectangle = Rectangle()
rectangle.main()
class Fan(object):
def __init__(self):
pass
def main(self):
speed = int(input("1档,2档,3档:"))
on = bool()
radius1 = float(5)
radius2 = float(10)
color1 = str('blue')
color2 = str('yellow')
self.gongneng(speed,on,radius1,radius2,color1,color2)
def gongneng (self,speed,on,radius1,radius2,color1,color2):
if speed == 1:
print(on)
elif speed == 2:
print('半径为:',radius1,'颜色:',color1)
elif speed == 3:
print('半径为:',radius2,'颜色:',color2)
else:
pass
if __name__ == "__main__":
fan =Fan()
fan.main()
| 0.316369 | 0.716863 |

<a href="https://hub.callysto.ca/jupyter/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fcallysto%2Fcurriculum-notebooks&branch=master&subPath=Science/SourcesOfEnergy/resources-and-recycling.ipynb&depth=1" target="_parent"><img src="https://raw.githubusercontent.com/callysto/curriculum-notebooks/master/open-in-callysto-button.svg?sanitize=true" width="123" height="24" alt="Open in Callysto"/></a>
```
import myMagics
%uiButtons
```
# Sources of Energy
## Learning Summary
Students will begin to understand the importance of managing waste properly. They will learn about the different sources of energy. In addition, they will learn that these energy sources are not unlimited and can run out. By the end of this notebook, students will understand which energy sources are renewable and non-renewable. They will learn and begin to think about the good and bad traits of energy sources as well as the lifespan of the source. Students will be prompted to think about the efficiency of the energy source after taking all factors into consideration.
## Solar Energy
Solar energy is energy that comes from the sun. The earth receives more energy from the sun in one hour than the whole world uses in one year. Solar energy is used to generate electricity using solar panels. A solar panel works by allowing particles of light to knock electrons free from atoms, generating a flow of electricity. Unfortunately, solar panels need solar cells to work which are created from already existing materials such as silicon, which is not an unlimited resource.
Solar panels are hard to manufacture making solar energy use low. Once they are made, solar panels do not release any pollution into the air. Energy from the sun is produced every day which means it will never run out, making it a renewable resource.
<img src="http://www.mountainjunkiegear.com/wp-content/uploads/2013/04/How-Solar-Panels-Work.jpg" style="margin: 0 auto; width: 1000px;">
#### Source Image: Solar Panels for your Home, Nov. 2015. Retrieved from http://www.mountainjunkiegear.com/wp-content/uploads/2013/04/How-Solar-Panels-Work
## Oil Energy and Natural Gas Energy
The most used source of energy around the world is oil. Oil occurs naturally and takes over a million years to form from old plants and bacteria. Oil comes from far underground. It is brought to the surface by special pumps and pipes. Most electricity, fuel and everyday things such as plastic come from this source. The most common use of oil that you may know is gasoline for vehicles. Oil is a fossil fuel. A fossil fuel is a source of energy that does not reproduce at the rate it is being used making it a non-renewable resource.
```
from IPython.display import YouTubeVideo
YouTubeVideo('WW8KfUJdTNY', width=800, height=300)
```
##### Source Video: Crude Oil Extraction, July 2016. Retrieved from https://www.youtube.com/watch?time_continue=83&v=WW8KfUJdTNY
Similarly, natural gas is found underground all over the world. It is often located around coal or oil pockets in the earth. The word 'natural' means that it is a gas formed by natural chemical processes that occur on earth. Natural gas is a by-product of decomposing biomass such as trees, grass, animals, wood and leaves.
Your house uses natural gas every day for heating, cooking and electricity. How does natural gas get from deep within the ground all the way to your house to be used? Natural gas companies drill thousands of feet into the earth and use big pumps to bring it to the surface. Then they send the gas to your town through gas pipes buried underground. A gas company brings it to your house in smaller pipes. Each household in Alberta pays a natural gas bill each month. Since it is produced from the same processes as oil, natural gas is a fossil fuel too making it a non-renewable source.
## Coal Energy
Another major source of energy around the world is coal. Most of the coal we use now was formed 300 million years ago. The energy in coal comes from energy that was stored in giant plants that lived hundreds of millions of years ago in swamp forests, even before the dinosaurs!
<img src="http://www.dynamicscience.com.au/tester/solutions1/electric/electricenergy.gif" style="margin: 0 auto; width: 1000px;">
#### Source Image: Energy Conservation, n.d. Retrieved from http://www.dynamicscience.com.au/tester/solutions1/electric/electricenergy
In the above diagram we see that plants are the only organisms on Earth that can harness solar energy and convert it into a more usable form called chemical energy. Chemical energy is trapped in the form of wood and other plant matter. Coal is also a source of chemical energy because when the plant matter died, it formed layers at the bottom of swamps. Water and dirt began to pile up on top of the dead plant remains and years of pressure formed the rock we call coal.
Humans dig up coal and burn it. When coal is burned it turns into chemical energy and heat energy. The heat energy is used to heat water into super-hot steam. Steam occurs when water heats up to such a high temperature that it changes from a liquid to a gas. Steam is kinetic energy. Kinetic energy is the energy of motion, or in other words the movement of particles. The steam (or kinetic energy) causes a generator to spin and produces electrical energy. Coal is a fossil fuel making it non-renewable. Humans use coal at a faster rate than it reproduces.
The law of conservation of energy states that energy cannot be created nor can it be destroyed. It can, however, change forms. Take the process outlined in the animation below. At every step there is a loss of energy. The efficiency of the process is given as an estimated percentage.
<img src="http://www.dynamicscience.com.au/tester/solutions1/electric/powerstation/Untitled-17.gif" style="margin: 0 auto; width: 1000px;">
#### Source Image: Energy Conservation, n.d. Retrieved from http://www.dynamicscience.com.au/tester/solutions1/electric/electricenergy
```
%%html
<style>
#list {
margin: 20px 0;
padding: 0;
}
#list li {
list-style: none;
margin: 5px 0;
}
.energy {
font-family: 'Courier New', Courier, monospace;
font-size: 15px;
}
.answerSelect {
margin: 10px 10px;
}
.correct {
color: green;
font-size: 25px;
display: none;
}
.wrong {
color: red;
font-size: 25px;
display: none;
}
.ansBtn {
cursor: pointer;
border: solid black 1px;
background: #d3d3d3;
padding: 10px 5px;
border-radius: 0px;
font-family: arial;
font-size: 20px;
}
.ansBtn:hover {
background: #f3f3f3;
}
.redtext {
color: red;
}
.greentext {
color: green;
}
</style>
<body>
<div style="height: 300px">
<ul id="list">
<li>
<label for="q1">1) What process captures solar energy?</label>
<select name="q1" id="q1" class="answerSelect">
<option value="default" selected>Select an Answer</option>
<option value="evaporation">Evaporation</option>
<option value="photosynthesis">Photosynthesis</option>
<option value="respiration">Respiration</option>
<option value="condensation">Condensation</option>
</select>
<span class="correct" id="Q1C">✓</span>
<span class="wrong" id="Q1W">✗</span>
</li>
<li>
<label for="q2">2) Which is the most inefficient energy conversion step in the process outlined above?</label>
<select name="q2" id="q2" class="answerSelect">
<option value="default" selected>Select an Answer</option>
<option value="kinetic into mechanical">Kinetic into mechanical</option>
<option value="chemical into heat">Chemical into heat</option>
<option value="mechanical into electrical">Mechanical into electrical</option>
<option value="solar into chemical">Solar into chemical</option>
</select>
<span class="correct" id="Q2C">✓</span>
<span class="wrong" id="Q2W">✗</span>
</li>
<li>
<label for="q3">3) The more steps in the process of generating electrical energy the</label>
<select name="q3" id="q3" class="answerSelect">
<option value="default" selected>Select an Answer</option>
<option value="less electrical energy that is generated">Less electrical energy that is generated</option>
<option value="the more electrical energy that is generated">The more electrical energy that is generated</option>
</select>
<span class="correct" id="Q3C">✓</span>
<span class="wrong" id="Q3W">✗</span>
</li>
<li>
<label for="q4">4) The energy lost is in the form of</label>
<select name="q4" id="q4" class="answerSelect">
<option value="default" selected>Select an Answer</option>
<option value="Electrical">Electrical</option>
<option value="Heat">Heat</option>
<option value="Chemical">Chemical</option>
<option value="Mechanical">Mechanical</option>
</select>
<span class="correct" id="Q4C">✓</span>
<span class="wrong" id="Q4W">✗</span>
</li>
<li>
<label for="q5">5) What type of energy is carried by steam</label>
<select name="q5" id="q5" class="answerSelect">
<option value="default" selected>Select an Answer</option>
<option value="Electrical">Electrical</option>
<option value="Chemical">Chemical</option>
<option value="Mechanical">Mechanical</option>
<option value="Kinetic">Kinetic</option>
</select>
<span class="correct" id="Q5C">✓</span>
<span class="wrong" id="Q5W">✗</span>
</li>
</ul>
<span class="ansBtn" id="ansBtn" onclick="checkAns()">Check Answers!</span>
</div>
<script src="main.js"></script>
</body>
```
## Biomass Energy
Perhaps one of the oldest forms of fuel known is biomass fuel. Biomass is any kind of biological matter that humans can burn in order to produce heat or energy. Biomass mainly consists of wood, leaves, and grass.
All biomass has storages of carbon and when biomass is burned the carbon is released into the atmosphere as CO2 gas. Resources such as wood, leaves and grass are NOT fossil fuels because biomass is said to be carbon neutral. Carbon neutral means that the amount of carbon released when biomass is burned is equal to the amount of carbon that is naturally needed in the environment for processes like photosynthesis.
As humans we must remember that trees take years to grow so if we carelessly use wood, it may not always be a resource immediately available for use. When someone in Alberta cuts down a tree to burn it for energy, it is said that one tree must be planted to replace it. Humans across Canada work to replace trees at the rate in which we use them making biomass a renewable source.
##### Biomass Fun Facts!
1) If you’ve ever been near a campfire or a fireplace, you’ve witnessed biomass energy through the burning of wood.
2) Biomass has been around since the beginning of time when man burned wood for heating and cooking.
3) Wood was the biggest energy provider in the world in the 1800’s.
4) Garbage can be burned to generate energy as well. This not only makes use of trash for energy, but reduces the amount of trash that goes into landfills. This process is called Waste-to-Energy.
## Wind Energy
Wind is a newer source of energy. The use of wind for energy production, mainly electricity, has only been developed recently. Most wind power is converted to electricity by using giant machines called wind turbines. Wind is a natural resource that will never run out making wind a renewable resource. Wind that occurs naturally moves the turbines. The turbines power a generator. A generator is a device that converts mechanical energy to electrical energy. In this case the mechanical energy is the movement of the turbines created by the wind. This mechanical energy is changed into electrical energy that can be used in a home.
Did you know that Alberta has a wind energy capacity of 1,483 megawatts. Alberta's wind farms produce enough electricity each year to power 625,000 homes, which is 8 percent of Alberta's electricity demand.
<img src="https://i.gifer.com/so8.gif" style="margin: 0 auto; width: 1000px;">
#### Source Image: #Wind, n.d. Retrieved from https://gifer.com/en/so8
## Water Energy
The correct term for water energy is hydropower. Hydro means water. The first use of water for energy dates back to around 4000 B.C. They used a water wheel during Roman times to water crop and supply drinking water to villages. Now, water creates energy in hydro-dams. A hydro-dam produces electricity when water pushes a device called a turbine. This turbine spins a generator which converts mechanical energy into electrical energy. In this case the mechanical energy that occurs is when the water pushes the turbine. Water is considered a resource that will never run out. It is plentiful and is replenished every time it rains.
<img src="http://www.wvic.com/images/stories/Hydroplants/hydroplant-animate.gif" style="margin: 0 auto; width: 1000px;">
#### Source Image: Wisconsin Valley Improvement Company, n.d. Retrieved from http://www.wvic.com/content/how_hydropower_works.cfm
## Nuclear Energy
Nuclear energy uses the power of an atom to create steam power. Atoms can create energy in two different ways: nuclear fission which is when the inside of an atom is split or nuclear fusion which is done by fusing the inside of two atoms.
The energy produced by the Sun, for example, comes from nuclear fusion reactions. Hydrogen gas in the core of the Sun is squeezed together so tightly that four hydrogen particles combine to form one helium atom. This is called nuclear fusion. When one of these two physical reactions occurs (nuclear fission or nuclear fusion) the atoms experience a slight loss of mass. The mass that is lost becomes a large amount of heat energy and light. This is why the sun is so hot and shines brightly. Did you know that Albert Einstein discovered his famous equation, E = mc2, with the sun and stars in mind? In his equation, Einstein invented a way to show that "Energy equals mass times the speed of light squared."
The heat generated in nuclear fusion and nuclear fission is used to heat up water and produce steam, which is then used to create electricity. The separation and joining of atoms occurs safely within the walls of a nuclear power station. Nuclear power generates nuclear waste that can be dangerous to human health and the environment.
```
from IPython.display import YouTubeVideo
YouTubeVideo('igf96TS3Els', width=800, height=300)
```
#### Source Video: Nuclear Power Station, July 2008. Retrieved from https://www.youtube.com/watch?v=igf96TS3Els
## Geothermal Energy
Geothermal energy is generated by the high temperature of earth's core heating water into steam. The term 'geo' means earth and the word 'thermal' means heat, which means geothermal is 'heat from the earth.' Geothermal energy plants convert large amounts of steam (kinetic energy) into usable electricity. Geothermal energy plants are located in prime areas. Canada does not have any commercial geothermal energy plants.
Note, there exists a device called a geothermal heat pump that can tap into geothermal energy to heat and cool buildings. A geothermal heat pump system consists of a heat pump, an air delivery system (ductwork), and a heat exchanger-a system of pipes buried in the shallow ground near the building. In the summer, the heat pump moves heat from the indoor air into the heat exchanger. In the winter, the heat pump removes heat from the heat exchanger and pumps it into the indoor air delivery system.
Why is geothermal energy a renewable resource? Because the source of geothermal energy is the unlimited amount of heat generated by the Earth's core. It is important to recognize geothermal energy systems DO NOT get their heat directly from the core. Instead, they pull heat from the crust—the rocky upper 20 miles of the planet's surface.
```
from IPython.display import YouTubeVideo
YouTubeVideo('y_ZGBhy48YI', width=800, height=300)
```
#### Source Video: Energy 101: Geothermal Heat Pumps, Jan. 2011.
## Renewable Energy Sources vs. Non-renewable
Renewable energy sources are energy sources that can be replaced at the same rate they are used. The source is plentiful and generally quite efficient. An example of a renewable energy source is wind. Wind is a renewable energy source because there is a limitless supply that is naturally produced.
Non-renewable energy sources are those that run out more quickly than they are naturally reproduced. Usually these energy sources take millions of year to produce and they have a bigger negative impact on the earth compared to alternate sources. An example of a non-renewable energy source is oil. Oil is non-renewable because humans are using it faster than it is being replaced naturally on earth.
In order to get comfortable with the two types of Energy Sources, try identifying the renewable energy sources from the non-renewable in the activity below.
```
%%html
<!-- Question 1 -->
<div>
<!-- Is Solar Energy a Renewable Energy Source? <p> tag below -->
<p>Is Solar Energy a Renewable Energy Source?</p>
<ul style="list-style-type: none">
<li>
<!-- Each <li> tag is a list item to represent a possible Answer
q1c1 stands for "question 1 choice 1", question 2 choice 3 would be
q2c3 for example. You can change this convention if you want its just
what I chose. Make sure all answers for a question have the same
name attribute, in this case q1. This makes it so only a single
radio button can be selected at one time-->
<input type="radio" name="q1" id="q1c1" value="right">
<label for="q1c1">Yes, it is Renewable.</label>
</li>
<li>
<input type="radio" name="q1" id="q1c2" value="wrong">
<label for="q1c2">No, it is a Non-Renewable Energy Source.</label>
</li>
</ul>
<!-- Give a unique id for the button, i chose q1Btn. Question 2 I
I would choose q2Btn and so on. This is used to tell the script
which question we are interested in. -->
<button id="q1Btn">Submit</button>
<!-- this is where the user will get feedback once answering the question,
the text that will go in here will be generated inside the script -->
<p id="q1AnswerStatus"></p>
</div>
<!-- Question 2 -->
<div>
<p>Is Oil Energy a Renewable Energy Source?</p>
<ul style="list-style-type: none">
<li>
<input type="radio" name="q2" id="q2c1" value="wrong">
<label for="q2c1">Yes, it is Renewable.</label>
</li>
<li>
<input type="radio" name="q2" id="q2c2" value="right">
<label for="q2c2">No, it is a Non-Renewable Energy Source. </label>
</li>
</ul>
<button id="q2Btn">Submit</button>
<p id="q2AnswerStatus"></p>
</div>
<!-- Question 3 -->
<div>
<p>Is Natural Gas a Renewable Energy Source?</p>
<ul style="list-style-type: none">
<li>
<input type="radio" name="q3" id="q3c1" value="wrong">
<label for="q3c1">Yes, it is Renewable.</label>
</li>
<li>
<input type="radio" name="q3" id="q3c2" value="right">
<label for="q3c2">No, it is a Non-Renewable Energy Source. </label>
</li>
</ul>
<button id="q3Btn">Submit</button>
<p id="q3AnswerStatus"></p>
</div>
<!-- Question 4 -->
<div>
<p>Is Coal Energy a Renewable Energy Source?</p>
<ul style="list-style-type: none">
<li>
<input type="radio" name="q4" id="q4c1" value="wrong">
<label for="q4c1">Yes, it is Renewable.</label>
</li>
<li>
<input type="radio" name="q4" id="q4c2" value="right">
<label for="q4c2">No, it is a Non-Renewable Energy Source. </label>
</li>
</ul>
<button id="q4Btn">Submit</button>
<p id="q4AnswerStatus"></p>
</div>
<!-- Question 5 -->
<div>
<p>Is Biomass Energy a Renewable Energy Source?</p>
<ul style="list-style-type: none">
<li>
<input type="radio" name="q5" id="q5c1" value="right">
<label for="q5c1">Yes, it is Renewable.</label>
</li>
<li>
<input type="radio" name="q5" id="q5c2" value="wrong">
<label for="q5c2">No, it is a Non-Renewable Energy Source. </label>
</li>
</ul>
<button id="q5Btn">Submit</button>
<p id="q5AnswerStatus"></p>
</div>
<!-- Question 6 -->
<div>
<p>Is Wind Energy a Renewable Energy Source?</p>
<ul style="list-style-type: none">
<li>
<input type="radio" name="q6" id="q6c1" value="right">
<label for="q6c1">Yes, it is Renewable.</label>
</li>
<li>
<input type="radio" name="q6" id="q6c2" value="wrong">
<label for="q6c2">No, it is a Non-Renewable Energy Source. </label>
</li>
</ul>
<button id="q6Btn">Submit</button>
<p id="q6AnswerStatus"></p>
</div>
<!-- Question 7 -->
<div>
<p>Is Water Energy a Renewable Energy Source?</p>
<ul style="list-style-type: none">
<li>
<input type="radio" name="q7" id="q7c1" value="right">
<label for="q7c1">Yes, it is Renewable.</label>
</li>
<li>
<input type="radio" name="q7" id="q7c2" value="wrong">
<label for="q7c2">No, it is a Non-Renewable Energy Source. </label>
</li>
</ul>
<button id="q7Btn">Submit</button>
<p id="q7AnswerStatus"></p>
</div>
<!-- Question 8 -->
<div>
<p>Is Nuclear Energy a Renewable Energy Source?</p>
<ul style="list-style-type: none">
<li>
<input type="radio" name="q8" id="q8c1" value="wrong">
<label for="q8c1">Yes, it is Renewable.</label>
</li>
<li>
<input type="radio" name="q8" id="q8c2" value="right">
<label for="q8c2">No, it is a Non-Renewable Energy Source. </label>
</li>
</ul>
<button id="q8Btn">Submit</button>
<p id="q8AnswerStatus"></p>
</div>
<!-- Question 9 -->
<div>
<p>Is Geothermal Energy a Renewable Energy Source?</p>
<ul style="list-style-type: none">
<li>
<input type="radio" name="q9" id="q9c1" value="right">
<label for="q9c1">Yes, it is Renewable.</label>
</li>
<li>
<input type="radio" name="q9" id="q9c2" value="wrong">
<label for="q9c2">No, it is a Non-Renewable Energy Source. </label>
</li>
</ul>
<button id="q9Btn">Submit</button>
<p id="q9AnswerStatus"></p>
</div>
<script>
// Question 1
// This looks at which question is being checked, pass in the buttons id
document.getElementById("q1Btn").onclick = function () {
// This if statment is used for the correct answer, in this case choice 3 is correct
if (document.getElementById("q1c1").checked) {
// "Correct Answer" field is where you can add any text to be displayed when it is correct
document.getElementById("q1AnswerStatus").innerHTML = "Correct Answer!";
} else {
// "Wrong Answer" field is where you can add any text to be displayed when it is wrong
document.getElementById("q1AnswerStatus").innerHTML = "Wrong Answer :(";
}
};
// Question 2
document.getElementById("q2Btn").onclick = function () {
if (document.getElementById("q2c2").checked) {
document.getElementById("q2AnswerStatus").innerHTML = "Correct Answer!";
} else {
document.getElementById("q2AnswerStatus").innerHTML = "Wrong Answer :(";
}
};
// Question 3
document.getElementById("q3Btn").onclick = function () {
if (document.getElementById("q3c2").checked) {
document.getElementById("q3AnswerStatus").innerHTML = "Correct Answer!";
} else {
document.getElementById("q3AnswerStatus").innerHTML = "Wrong Answer :(";
}
}; // Question 4
document.getElementById("q4Btn").onclick = function () {
if (document.getElementById("q4c2").checked) {
document.getElementById("q4AnswerStatus").innerHTML = "Correct Answer!";
} else {
document.getElementById("q4AnswerStatus").innerHTML = "Wrong Answer :(";
}
}; // Question 5
document.getElementById("q5Btn").onclick = function () {
if (document.getElementById("q5c1").checked) {
document.getElementById("q5AnswerStatus").innerHTML = "Correct Answer!";
} else {
document.getElementById("q5AnswerStatus").innerHTML = "Wrong Answer :(";
}
}; // Question 6
document.getElementById("q6Btn").onclick = function () {
if (document.getElementById("q6c1").checked) {
document.getElementById("q6AnswerStatus").innerHTML = "Correct Answer!";
} else {
document.getElementById("q6AnswerStatus").innerHTML = "Wrong Answer :(";
}
}; // Question 7
document.getElementById("q7Btn").onclick = function () {
if (document.getElementById("q7c1").checked) {
document.getElementById("q7AnswerStatus").innerHTML = "Correct Answer!";
} else {
document.getElementById("q7AnswerStatus").innerHTML = "Wrong Answer :(";
}
}; // Question 8
document.getElementById("q8Btn").onclick = function () {
if (document.getElementById("q8c2").checked) {
document.getElementById("q8AnswerStatus").innerHTML = "Correct Answer!";
} else {
document.getElementById("q8AnswerStatus").innerHTML = "Wrong Answer :(";
}
}; // Question 9
document.getElementById("q9Btn").onclick = function () {
if (document.getElementById("q9c1").checked) {
document.getElementById("q9AnswerStatus").innerHTML = "Correct Answer!";
} else {
document.getElementById("q9AnswerStatus").innerHTML = "Wrong Answer :(";
}
};
</script>
```
## The Good and Bad Traits of Energy Sources
Now that we understand each of the energy sources, it is important to weigh the good and the bad traits of each energy source. Efficient means the energy technique is achieving maximum productivity with minimum wasted effort or expense. Note that the bad traits of an energy source are usually negative side effects that we are trying to lessen or prevent while gathering usable energy.
<img src="https://thesolarscoop.com/wp-content/uploads/2018/03/Solar.jpg" style="margin: 0 auto; width: 1000px;">
#### Source Image: EcoFasten, March 2018. Retrieved from https://thesolarscoop.com/wp-content/uploads/2018/03/Solar
```
%%html
<head>
<style>
table, th, td {
border: 1px solid black;
border-collapse: collapse;
}
th, td {
padding: 5px;
text-align: left;
}
</style>
</head>
<body>
<h1>Solar</h1>
<p></p>
<table style="width:100%" table align="left">
<tr>
<th style="text-align:center">Good Traits</th>
<th style="text-align:center">Bad Traits</th>
</tr>
<tr>
<td style="text-align:left">Solar energy has recently experienced decreasing costs and high public support. </td>
<td style="text-align:left">Solar energy is intermittent, i.e. electricity production is dependent on sunlight.</td>
</tr>
<tr>
<td style="text-align:left">Low CO2 emissions.</td>
<td style="text-align:left">Expensive but in recent years the cost of solar energy equipment has decreased.</td>
</tr>
<tr>
<td style="text-align:left">Easy to install, little operation and maintenance work.</td>
<td style="text-align:left">Forecasts are more unpredictable in comparison to fossil fuels (but better than wind).</td>
</tr>
</table>
<h3></h3>
<p></p>
</body>
</html>
from IPython.display import Image
Image(url= "https://ak5.picdn.net/shutterstock/videos/17748445/thumb/5.jpg", width=1000, height=300)
```
#### Source Image: Shutterstock, n.d. Retrieved from https://www.shutterstock.com/video/clip-17748445-close-up-industrial-oil-pump-jack-working
```
%%html
<head>
<style>
table, th, td {
border: 1px solid black;
border-collapse: collapse;
}
th, td {
padding: 5px;
text-align: left;
}
</style>
</head>
<body>
<h1>Oil</h1>
<p></p>
<table style="width:100%">
<tr>
<th style="text-align:center">Good Traits</th>
<th style="text-align:center">Bad Traits</th>
</tr>
<tr>
<td style="text-align:left">Oil is cheap to produce and refine. </td>
<td style="text-align:left">Burning oil for electricity is a major source of air pollution on Earth and leads to health concerns and environmental damage. </td>
</tr>
<tr>
<td style="text-align:left">Unlike the renewable energy sources such as solar and wind energy that are weather dependent sources of power, Oil represents a reliable, ready-to-use source of energy.</td>
<td style="text-align:left">Burning oil for energy releases harmful gases into the atmosphere such as carbon dioxide (CO2), carbon monoxide (CO), nitrogen oxides (NOx), and sulfur dioxide (SO2, causes acid rain). </td>
</tr>
<tr>
<td></td>
<td style="text-align:left">Despite the fact that oil energy can get jobs done in a less expensive way, it is not a renewable source of energy. There will come a time when we run out of supply.</td>
</tr>
</table>
<h3></h3>
<p></p>
</body>
</html>
from IPython.display import Image
Image(filename="images/gasmap.jpg", width=1000, height=300)
```
#### Source Image: Studentenergy, n.d. Retrieved from https://www.studentenergy.org/topics/natural-gas
```
%%html
<head>
<style>
table, th, td {
border: 1px solid black;
border-collapse: collapse;
}
th, td {
padding: 5px;
text-align: left;
}
</style>
</head>
<body>
<h1>Natural Gas</h1>
<p></p>
<table style="width:100%">
<tr>
<th style="text-align:center">Good Traits</th>
<th style="text-align:center">Bad Traits</th>
</tr>
<tr>
<td style="text-align:left">Emits the least CO2 compared to the other forms of non-renewable fossil fuels.</td>
<td style="text-align:left">Gas drilling has a negative impact on the environment.</td>
</tr>
<tr>
<td style="text-align:left"> Natural gas hot water heaters typically heat water twice as fast as electric heaters.</td>
<td style="text-align:left">Some regions that sell natural gas face political instability. This usually occurs when a country is dependent on natural gas as their only source of income. </td>
</tr>
<tr>
<td></td>
<td style="text-align:left">Natural gas is the more expensive energy source in comparison to other fossil fuels.</td>
</tr>
</table>
<h3></h3>
<p></p>
</body>
</html>
from IPython.display import Image
Image(url= "https://images.theconversation.com/files/125332/original/image-20160606-26003-1hjtcr5.jpg?ixlib=rb-1.1.0&q=45&auto=format&w=496&fit=clip", width=1000, height=100)
```
#### Source Image: The Conversation, June 2016. Retrieved from http://theconversation.com/is-coal-the-only-way-to-deal-with-energy-poverty-in-developing-economies-54163
```
%%html
<head>
<style>
table, th, td {
border: 1px solid black;
border-collapse: collapse;
}
th, td {
padding: 5px;
text-align: left;
}
</style>
</head>
<body>
<h1>Coal</h1>
</p>
<table style="width:100%">
<tr>
<th style="text-align:center">Good Traits</th>
<th style="text-align:center">Bad Traits</th>
</tr>
<tr>
<td style="text-align:left">Coal provides stable and large-scale electricity generation.</td>
<td style="text-align:left">Coal power plants emit high levels of CO2.</td>
</tr>
<tr>
<td style="text-align:left">Coal power has a competitive production cost. Fuel costs are low and coal markets are well-functioning.</td>
<td style="text-align:left">Technologies to reduce coal power plant CO2 emissions are expensive.</td>
</tr>
<tr>
<td></td>
<td style="text-align:left">Coal mining impacts the landscape and infrastructure leading to erosion and displacement of animals from their natural habitats.</td>
</tr>
</table>
</body>
</html>
from IPython.display import Image
Image(url= "https://media.nationalgeographic.org/assets/photos/000/317/31713.jpg", width=1000, height=100)
```
#### Source Image: National Geographic, Photographs by USDA, V. Zutshi, S. Beaugez, M. Hendrikx, S. Heydt, M. Oeltjenbruns, A. Munoraharjo, F. Choudhury, G. Upton, O. Siudak, M. Gunther, R. Singh. Retrieved from https://www.nationalgeographic.org/photo/2biomass-crops-dup/
```
%%html
<head>
<style>
table, th, td {
border: 1px solid black;
border-collapse: collapse;
}
th, td {
padding: 5px;
text-align: left;
}
</style>
</head>
<body>
<h1>Biomass</h1>
<p></p>
<table style="width:100%">
<tr>
<th style="text-align:center">Good Traits</th>
<th style="text-align:center">Bad Traits</th>
</tr>
<tr>
<td style="text-align:left">Biomass resources are abundant, cost-effective and political risk is limited.</td>
<td style="text-align:left">Requires large storage space.</td>
</tr>
<tr>
<td style="text-align:left">By using biomass in power production instead of fossil fuels, CO2 emissions are significantly reduced.</td>
<td style="text-align:left">Burning of biomass still emits a fair level of CO2 and without proper management of biomass usage this CO2 could easily become a green house gas. </td>
</tr>
<tr>
<td style="text-align:left">Properly managed biomass is carbon neutral over time. If not done in a sustainable way, biomass burning is doing more harm than good.</td>
<td></td>
</tr>
</table>
</body>
</html>
from IPython.display import Image
Image(url= "https://d32r1sh890xpii.cloudfront.net/article/718x300/1ffb18f07cf19289be69259800495f00.jpg", width=1000, height=300)
```
#### Source Image: Oilprice, n.d. Retrieved from https://oilprice.com/Alternative-Energy/Wind-Power/US-Wind-Energy-Demand-Surges.html
```
%%html
<head>
<style>
table, th, td {
border: 1px solid black;
border-collapse: collapse;
}
th, td {
padding: 5px;
text-align: left;
}
</style>
</head>
<body>
<h1>Wind</h1>
<p></p>
<table style="width:100%">
<tr>
<th style="text-align:center">Good Traits</th>
<th style="text-align:center">Bad Traits</th>
</tr>
<tr>
<td style="text-align:left">Wind power emits essentially no CO2 across its life cycle.</td>
<td style="text-align:left">Has an impact on the landscape, wildlife and also emits noise.</td>
</tr>
<tr>
<td style="text-align:left">Has no fuel costs.</td>
<td style="text-align:left">Dependent on available wind.</td>
</tr>
<tr>
<td></td>
<td style="text-align:left">Has significant investment costs.</td>
</tr>
</table>
</body>
</html>
from IPython.display import Image
Image(filename="images/hydroelectric.jpg")
```
#### Source Image: What is Hydroelectric Power Plant? How Does It Work?, Jul. 2020. Retrieved from https://www.usgs.gov/media/images/flow-water-produces-hydroelectricity
```
%%html
<head>
<style>
table, th, td {
border: 1px solid black;
border-collapse: collapse;
}
th, td {
padding: 5px;
text-align: left;
}
</style>
</head>
<body>
<h1>Hydro</h1>
<table style="width:100%">
<tr>
<th style="text-align:center">Good Traits</th>
<th style="text-align:center">Bad Traits</th>
</tr>
<tr>
<td style="text-align:left">Hydro power has almost no emissions that impact the climate or the environment.</td>
<td style="text-align:left">Hydro power plants are a significant encroachment on the landscape and impact river ecosystems.</td>
</tr>
<tr>
<td style="text-align:left">Provides large-scale and stable electricity generation.</td>
<td style="text-align:left">Constructing a new hydro power plant requires a substantial investment.</td>
</tr>
<tr>
<td style="text-align:left">Has no fuel costs. Hydro power plants have a long economic life.</td>
<td></td>
</tr>
</table>
</body>
</html>
from IPython.display import Image
Image(url= "https://images.theconversation.com/files/178921/original/file-20170719-13558-rs7g2s.jpg?ixlib=rb-1.1.0&rect=0%2C532%2C4000%2C2377&q=45&auto=format&w=496&fit=clip", width=1000, height=300)
```
#### Source Image: Harga, n.d. Retrieved from https://www.tokoonlineindonesia.id/small-nuclear-power-reactors-future-or-folly.html
```
%%html
<head>
<style>
table, th, td {
border: 1px solid black;
border-collapse: collapse;
}
th, td {
padding: 5px;
text-align: left;
}
</style>
</head>
<body>
<h1>Nuclear</h1>
<p></p>
<table style="width:100%">
<tr>
<th style="text-align:center">Good Traits</th>
<th style="text-align:center">Bad Traits</th>
</tr>
<tr>
<td style="text-align:left">Nuclear power emits low levels of CO2 across its life cycle.</td>
<td style="text-align:left">The management of high-level waste requires storage in secure facilities for a very long time.</td>
</tr>
<tr>
<td style="text-align:left">Provides stable and large-scale electricity generation.</td>
<td style="text-align:left">Construction of a new nuclear power plant requires major investments.</td>
</tr>
<tr>
<td style="text-align:left">Costs for fuel, operation and maintenance are normally relatively low.</td>
<td style="text-align:left">If nuclear waste spills or is handled incorrectly it has serious effects on the environment. </td>
</tr>
</table>
</body>
</html>
from IPython.display import Image
Image(url= "https://www.longrefrigeration.com/wp-content/uploads/2017/06/Depositphotos_59228621_s-2015.jpg", width=1000, height=100)
```
#### Source Image: Long Heating and Cooling Geothermal Heat Pumps, June 2017. Retrieved from https://www.longrefrigeration.com/how-geothermal-energy-works/depositphotos_59228621_s-2015/
```
%%html
<head>
<style>
table, th, td {
border: 1px solid black;
border-collapse: collapse;
}
th, td {
padding: 5px;
text-align: left;
}
</style>
</head>
<body>
<h1>Geothermal</h1>
<p></p>
<table style="width:100%">
<tr>
<th style="text-align:center">Good Traits</th>
<th style="text-align:center">Bad Traits</th>
</tr>
<tr>
<td style="text-align:left">It only requires heat from the earth to work, a limitless supply.</td>
<td style="text-align:left">High costs to construct geothermal plants.</td>
</tr>
<tr>
<td style="text-align:left">It is simple and reliable, unlike the unpredictability of solar or wind energy.</td>
<td style="text-align:left">Sites must be located in prime areas, requiring long distance transportation of the resourse through pipe, which is often costly.</td>
</tr>
<tr>
<td style="text-align:left">It is a domestic source of energy found throughout the world. This means that geothermal energy is used in many households across the world, mainly for heating/cooling systems. </td>
<td style="text-align:left">Emits some sulfur dioxide (SO2). </td>
</tr>
</table>
</body>
</html>
```
## Conclusion
In this notebook students learned about the 9 most popular sources of energy. The student should have a more clear understanding of the differences between renewable energy sources and non-renewable energy sources. Note that the good and bad traits of each energy source prompted the student to think about the efficiency of each energy source.
### And now a *"FeW FUn ENeRGy JokES"* to conclude!
* What did Godzilla say when he ate the nuclear power plant?
“Shocking!”
* Why did the lights go out?
Because they liked each other!
* What would a barefooted man get if he steps on an electric wire?
A pair of shocks!
* Why is wind power popular?
Because it has a lot of fans!
[](https://github.com/callysto/curriculum-notebooks/blob/master/LICENSE.md)
|
github_jupyter
|
import myMagics
%uiButtons
from IPython.display import YouTubeVideo
YouTubeVideo('WW8KfUJdTNY', width=800, height=300)
%%html
<style>
#list {
margin: 20px 0;
padding: 0;
}
#list li {
list-style: none;
margin: 5px 0;
}
.energy {
font-family: 'Courier New', Courier, monospace;
font-size: 15px;
}
.answerSelect {
margin: 10px 10px;
}
.correct {
color: green;
font-size: 25px;
display: none;
}
.wrong {
color: red;
font-size: 25px;
display: none;
}
.ansBtn {
cursor: pointer;
border: solid black 1px;
background: #d3d3d3;
padding: 10px 5px;
border-radius: 0px;
font-family: arial;
font-size: 20px;
}
.ansBtn:hover {
background: #f3f3f3;
}
.redtext {
color: red;
}
.greentext {
color: green;
}
</style>
<body>
<div style="height: 300px">
<ul id="list">
<li>
<label for="q1">1) What process captures solar energy?</label>
<select name="q1" id="q1" class="answerSelect">
<option value="default" selected>Select an Answer</option>
<option value="evaporation">Evaporation</option>
<option value="photosynthesis">Photosynthesis</option>
<option value="respiration">Respiration</option>
<option value="condensation">Condensation</option>
</select>
<span class="correct" id="Q1C">✓</span>
<span class="wrong" id="Q1W">✗</span>
</li>
<li>
<label for="q2">2) Which is the most inefficient energy conversion step in the process outlined above?</label>
<select name="q2" id="q2" class="answerSelect">
<option value="default" selected>Select an Answer</option>
<option value="kinetic into mechanical">Kinetic into mechanical</option>
<option value="chemical into heat">Chemical into heat</option>
<option value="mechanical into electrical">Mechanical into electrical</option>
<option value="solar into chemical">Solar into chemical</option>
</select>
<span class="correct" id="Q2C">✓</span>
<span class="wrong" id="Q2W">✗</span>
</li>
<li>
<label for="q3">3) The more steps in the process of generating electrical energy the</label>
<select name="q3" id="q3" class="answerSelect">
<option value="default" selected>Select an Answer</option>
<option value="less electrical energy that is generated">Less electrical energy that is generated</option>
<option value="the more electrical energy that is generated">The more electrical energy that is generated</option>
</select>
<span class="correct" id="Q3C">✓</span>
<span class="wrong" id="Q3W">✗</span>
</li>
<li>
<label for="q4">4) The energy lost is in the form of</label>
<select name="q4" id="q4" class="answerSelect">
<option value="default" selected>Select an Answer</option>
<option value="Electrical">Electrical</option>
<option value="Heat">Heat</option>
<option value="Chemical">Chemical</option>
<option value="Mechanical">Mechanical</option>
</select>
<span class="correct" id="Q4C">✓</span>
<span class="wrong" id="Q4W">✗</span>
</li>
<li>
<label for="q5">5) What type of energy is carried by steam</label>
<select name="q5" id="q5" class="answerSelect">
<option value="default" selected>Select an Answer</option>
<option value="Electrical">Electrical</option>
<option value="Chemical">Chemical</option>
<option value="Mechanical">Mechanical</option>
<option value="Kinetic">Kinetic</option>
</select>
<span class="correct" id="Q5C">✓</span>
<span class="wrong" id="Q5W">✗</span>
</li>
</ul>
<span class="ansBtn" id="ansBtn" onclick="checkAns()">Check Answers!</span>
</div>
<script src="main.js"></script>
</body>
from IPython.display import YouTubeVideo
YouTubeVideo('igf96TS3Els', width=800, height=300)
from IPython.display import YouTubeVideo
YouTubeVideo('y_ZGBhy48YI', width=800, height=300)
%%html
<!-- Question 1 -->
<div>
<!-- Is Solar Energy a Renewable Energy Source? <p> tag below -->
<p>Is Solar Energy a Renewable Energy Source?</p>
<ul style="list-style-type: none">
<li>
<!-- Each <li> tag is a list item to represent a possible Answer
q1c1 stands for "question 1 choice 1", question 2 choice 3 would be
q2c3 for example. You can change this convention if you want its just
what I chose. Make sure all answers for a question have the same
name attribute, in this case q1. This makes it so only a single
radio button can be selected at one time-->
<input type="radio" name="q1" id="q1c1" value="right">
<label for="q1c1">Yes, it is Renewable.</label>
</li>
<li>
<input type="radio" name="q1" id="q1c2" value="wrong">
<label for="q1c2">No, it is a Non-Renewable Energy Source.</label>
</li>
</ul>
<!-- Give a unique id for the button, i chose q1Btn. Question 2 I
I would choose q2Btn and so on. This is used to tell the script
which question we are interested in. -->
<button id="q1Btn">Submit</button>
<!-- this is where the user will get feedback once answering the question,
the text that will go in here will be generated inside the script -->
<p id="q1AnswerStatus"></p>
</div>
<!-- Question 2 -->
<div>
<p>Is Oil Energy a Renewable Energy Source?</p>
<ul style="list-style-type: none">
<li>
<input type="radio" name="q2" id="q2c1" value="wrong">
<label for="q2c1">Yes, it is Renewable.</label>
</li>
<li>
<input type="radio" name="q2" id="q2c2" value="right">
<label for="q2c2">No, it is a Non-Renewable Energy Source. </label>
</li>
</ul>
<button id="q2Btn">Submit</button>
<p id="q2AnswerStatus"></p>
</div>
<!-- Question 3 -->
<div>
<p>Is Natural Gas a Renewable Energy Source?</p>
<ul style="list-style-type: none">
<li>
<input type="radio" name="q3" id="q3c1" value="wrong">
<label for="q3c1">Yes, it is Renewable.</label>
</li>
<li>
<input type="radio" name="q3" id="q3c2" value="right">
<label for="q3c2">No, it is a Non-Renewable Energy Source. </label>
</li>
</ul>
<button id="q3Btn">Submit</button>
<p id="q3AnswerStatus"></p>
</div>
<!-- Question 4 -->
<div>
<p>Is Coal Energy a Renewable Energy Source?</p>
<ul style="list-style-type: none">
<li>
<input type="radio" name="q4" id="q4c1" value="wrong">
<label for="q4c1">Yes, it is Renewable.</label>
</li>
<li>
<input type="radio" name="q4" id="q4c2" value="right">
<label for="q4c2">No, it is a Non-Renewable Energy Source. </label>
</li>
</ul>
<button id="q4Btn">Submit</button>
<p id="q4AnswerStatus"></p>
</div>
<!-- Question 5 -->
<div>
<p>Is Biomass Energy a Renewable Energy Source?</p>
<ul style="list-style-type: none">
<li>
<input type="radio" name="q5" id="q5c1" value="right">
<label for="q5c1">Yes, it is Renewable.</label>
</li>
<li>
<input type="radio" name="q5" id="q5c2" value="wrong">
<label for="q5c2">No, it is a Non-Renewable Energy Source. </label>
</li>
</ul>
<button id="q5Btn">Submit</button>
<p id="q5AnswerStatus"></p>
</div>
<!-- Question 6 -->
<div>
<p>Is Wind Energy a Renewable Energy Source?</p>
<ul style="list-style-type: none">
<li>
<input type="radio" name="q6" id="q6c1" value="right">
<label for="q6c1">Yes, it is Renewable.</label>
</li>
<li>
<input type="radio" name="q6" id="q6c2" value="wrong">
<label for="q6c2">No, it is a Non-Renewable Energy Source. </label>
</li>
</ul>
<button id="q6Btn">Submit</button>
<p id="q6AnswerStatus"></p>
</div>
<!-- Question 7 -->
<div>
<p>Is Water Energy a Renewable Energy Source?</p>
<ul style="list-style-type: none">
<li>
<input type="radio" name="q7" id="q7c1" value="right">
<label for="q7c1">Yes, it is Renewable.</label>
</li>
<li>
<input type="radio" name="q7" id="q7c2" value="wrong">
<label for="q7c2">No, it is a Non-Renewable Energy Source. </label>
</li>
</ul>
<button id="q7Btn">Submit</button>
<p id="q7AnswerStatus"></p>
</div>
<!-- Question 8 -->
<div>
<p>Is Nuclear Energy a Renewable Energy Source?</p>
<ul style="list-style-type: none">
<li>
<input type="radio" name="q8" id="q8c1" value="wrong">
<label for="q8c1">Yes, it is Renewable.</label>
</li>
<li>
<input type="radio" name="q8" id="q8c2" value="right">
<label for="q8c2">No, it is a Non-Renewable Energy Source. </label>
</li>
</ul>
<button id="q8Btn">Submit</button>
<p id="q8AnswerStatus"></p>
</div>
<!-- Question 9 -->
<div>
<p>Is Geothermal Energy a Renewable Energy Source?</p>
<ul style="list-style-type: none">
<li>
<input type="radio" name="q9" id="q9c1" value="right">
<label for="q9c1">Yes, it is Renewable.</label>
</li>
<li>
<input type="radio" name="q9" id="q9c2" value="wrong">
<label for="q9c2">No, it is a Non-Renewable Energy Source. </label>
</li>
</ul>
<button id="q9Btn">Submit</button>
<p id="q9AnswerStatus"></p>
</div>
<script>
// Question 1
// This looks at which question is being checked, pass in the buttons id
document.getElementById("q1Btn").onclick = function () {
// This if statment is used for the correct answer, in this case choice 3 is correct
if (document.getElementById("q1c1").checked) {
// "Correct Answer" field is where you can add any text to be displayed when it is correct
document.getElementById("q1AnswerStatus").innerHTML = "Correct Answer!";
} else {
// "Wrong Answer" field is where you can add any text to be displayed when it is wrong
document.getElementById("q1AnswerStatus").innerHTML = "Wrong Answer :(";
}
};
// Question 2
document.getElementById("q2Btn").onclick = function () {
if (document.getElementById("q2c2").checked) {
document.getElementById("q2AnswerStatus").innerHTML = "Correct Answer!";
} else {
document.getElementById("q2AnswerStatus").innerHTML = "Wrong Answer :(";
}
};
// Question 3
document.getElementById("q3Btn").onclick = function () {
if (document.getElementById("q3c2").checked) {
document.getElementById("q3AnswerStatus").innerHTML = "Correct Answer!";
} else {
document.getElementById("q3AnswerStatus").innerHTML = "Wrong Answer :(";
}
}; // Question 4
document.getElementById("q4Btn").onclick = function () {
if (document.getElementById("q4c2").checked) {
document.getElementById("q4AnswerStatus").innerHTML = "Correct Answer!";
} else {
document.getElementById("q4AnswerStatus").innerHTML = "Wrong Answer :(";
}
}; // Question 5
document.getElementById("q5Btn").onclick = function () {
if (document.getElementById("q5c1").checked) {
document.getElementById("q5AnswerStatus").innerHTML = "Correct Answer!";
} else {
document.getElementById("q5AnswerStatus").innerHTML = "Wrong Answer :(";
}
}; // Question 6
document.getElementById("q6Btn").onclick = function () {
if (document.getElementById("q6c1").checked) {
document.getElementById("q6AnswerStatus").innerHTML = "Correct Answer!";
} else {
document.getElementById("q6AnswerStatus").innerHTML = "Wrong Answer :(";
}
}; // Question 7
document.getElementById("q7Btn").onclick = function () {
if (document.getElementById("q7c1").checked) {
document.getElementById("q7AnswerStatus").innerHTML = "Correct Answer!";
} else {
document.getElementById("q7AnswerStatus").innerHTML = "Wrong Answer :(";
}
}; // Question 8
document.getElementById("q8Btn").onclick = function () {
if (document.getElementById("q8c2").checked) {
document.getElementById("q8AnswerStatus").innerHTML = "Correct Answer!";
} else {
document.getElementById("q8AnswerStatus").innerHTML = "Wrong Answer :(";
}
}; // Question 9
document.getElementById("q9Btn").onclick = function () {
if (document.getElementById("q9c1").checked) {
document.getElementById("q9AnswerStatus").innerHTML = "Correct Answer!";
} else {
document.getElementById("q9AnswerStatus").innerHTML = "Wrong Answer :(";
}
};
</script>
%%html
<head>
<style>
table, th, td {
border: 1px solid black;
border-collapse: collapse;
}
th, td {
padding: 5px;
text-align: left;
}
</style>
</head>
<body>
<h1>Solar</h1>
<p></p>
<table style="width:100%" table align="left">
<tr>
<th style="text-align:center">Good Traits</th>
<th style="text-align:center">Bad Traits</th>
</tr>
<tr>
<td style="text-align:left">Solar energy has recently experienced decreasing costs and high public support. </td>
<td style="text-align:left">Solar energy is intermittent, i.e. electricity production is dependent on sunlight.</td>
</tr>
<tr>
<td style="text-align:left">Low CO2 emissions.</td>
<td style="text-align:left">Expensive but in recent years the cost of solar energy equipment has decreased.</td>
</tr>
<tr>
<td style="text-align:left">Easy to install, little operation and maintenance work.</td>
<td style="text-align:left">Forecasts are more unpredictable in comparison to fossil fuels (but better than wind).</td>
</tr>
</table>
<h3></h3>
<p></p>
</body>
</html>
from IPython.display import Image
Image(url= "https://ak5.picdn.net/shutterstock/videos/17748445/thumb/5.jpg", width=1000, height=300)
%%html
<head>
<style>
table, th, td {
border: 1px solid black;
border-collapse: collapse;
}
th, td {
padding: 5px;
text-align: left;
}
</style>
</head>
<body>
<h1>Oil</h1>
<p></p>
<table style="width:100%">
<tr>
<th style="text-align:center">Good Traits</th>
<th style="text-align:center">Bad Traits</th>
</tr>
<tr>
<td style="text-align:left">Oil is cheap to produce and refine. </td>
<td style="text-align:left">Burning oil for electricity is a major source of air pollution on Earth and leads to health concerns and environmental damage. </td>
</tr>
<tr>
<td style="text-align:left">Unlike the renewable energy sources such as solar and wind energy that are weather dependent sources of power, Oil represents a reliable, ready-to-use source of energy.</td>
<td style="text-align:left">Burning oil for energy releases harmful gases into the atmosphere such as carbon dioxide (CO2), carbon monoxide (CO), nitrogen oxides (NOx), and sulfur dioxide (SO2, causes acid rain). </td>
</tr>
<tr>
<td></td>
<td style="text-align:left">Despite the fact that oil energy can get jobs done in a less expensive way, it is not a renewable source of energy. There will come a time when we run out of supply.</td>
</tr>
</table>
<h3></h3>
<p></p>
</body>
</html>
from IPython.display import Image
Image(filename="images/gasmap.jpg", width=1000, height=300)
%%html
<head>
<style>
table, th, td {
border: 1px solid black;
border-collapse: collapse;
}
th, td {
padding: 5px;
text-align: left;
}
</style>
</head>
<body>
<h1>Natural Gas</h1>
<p></p>
<table style="width:100%">
<tr>
<th style="text-align:center">Good Traits</th>
<th style="text-align:center">Bad Traits</th>
</tr>
<tr>
<td style="text-align:left">Emits the least CO2 compared to the other forms of non-renewable fossil fuels.</td>
<td style="text-align:left">Gas drilling has a negative impact on the environment.</td>
</tr>
<tr>
<td style="text-align:left"> Natural gas hot water heaters typically heat water twice as fast as electric heaters.</td>
<td style="text-align:left">Some regions that sell natural gas face political instability. This usually occurs when a country is dependent on natural gas as their only source of income. </td>
</tr>
<tr>
<td></td>
<td style="text-align:left">Natural gas is the more expensive energy source in comparison to other fossil fuels.</td>
</tr>
</table>
<h3></h3>
<p></p>
</body>
</html>
from IPython.display import Image
Image(url= "https://images.theconversation.com/files/125332/original/image-20160606-26003-1hjtcr5.jpg?ixlib=rb-1.1.0&q=45&auto=format&w=496&fit=clip", width=1000, height=100)
%%html
<head>
<style>
table, th, td {
border: 1px solid black;
border-collapse: collapse;
}
th, td {
padding: 5px;
text-align: left;
}
</style>
</head>
<body>
<h1>Coal</h1>
</p>
<table style="width:100%">
<tr>
<th style="text-align:center">Good Traits</th>
<th style="text-align:center">Bad Traits</th>
</tr>
<tr>
<td style="text-align:left">Coal provides stable and large-scale electricity generation.</td>
<td style="text-align:left">Coal power plants emit high levels of CO2.</td>
</tr>
<tr>
<td style="text-align:left">Coal power has a competitive production cost. Fuel costs are low and coal markets are well-functioning.</td>
<td style="text-align:left">Technologies to reduce coal power plant CO2 emissions are expensive.</td>
</tr>
<tr>
<td></td>
<td style="text-align:left">Coal mining impacts the landscape and infrastructure leading to erosion and displacement of animals from their natural habitats.</td>
</tr>
</table>
</body>
</html>
from IPython.display import Image
Image(url= "https://media.nationalgeographic.org/assets/photos/000/317/31713.jpg", width=1000, height=100)
%%html
<head>
<style>
table, th, td {
border: 1px solid black;
border-collapse: collapse;
}
th, td {
padding: 5px;
text-align: left;
}
</style>
</head>
<body>
<h1>Biomass</h1>
<p></p>
<table style="width:100%">
<tr>
<th style="text-align:center">Good Traits</th>
<th style="text-align:center">Bad Traits</th>
</tr>
<tr>
<td style="text-align:left">Biomass resources are abundant, cost-effective and political risk is limited.</td>
<td style="text-align:left">Requires large storage space.</td>
</tr>
<tr>
<td style="text-align:left">By using biomass in power production instead of fossil fuels, CO2 emissions are significantly reduced.</td>
<td style="text-align:left">Burning of biomass still emits a fair level of CO2 and without proper management of biomass usage this CO2 could easily become a green house gas. </td>
</tr>
<tr>
<td style="text-align:left">Properly managed biomass is carbon neutral over time. If not done in a sustainable way, biomass burning is doing more harm than good.</td>
<td></td>
</tr>
</table>
</body>
</html>
from IPython.display import Image
Image(url= "https://d32r1sh890xpii.cloudfront.net/article/718x300/1ffb18f07cf19289be69259800495f00.jpg", width=1000, height=300)
%%html
<head>
<style>
table, th, td {
border: 1px solid black;
border-collapse: collapse;
}
th, td {
padding: 5px;
text-align: left;
}
</style>
</head>
<body>
<h1>Wind</h1>
<p></p>
<table style="width:100%">
<tr>
<th style="text-align:center">Good Traits</th>
<th style="text-align:center">Bad Traits</th>
</tr>
<tr>
<td style="text-align:left">Wind power emits essentially no CO2 across its life cycle.</td>
<td style="text-align:left">Has an impact on the landscape, wildlife and also emits noise.</td>
</tr>
<tr>
<td style="text-align:left">Has no fuel costs.</td>
<td style="text-align:left">Dependent on available wind.</td>
</tr>
<tr>
<td></td>
<td style="text-align:left">Has significant investment costs.</td>
</tr>
</table>
</body>
</html>
from IPython.display import Image
Image(filename="images/hydroelectric.jpg")
%%html
<head>
<style>
table, th, td {
border: 1px solid black;
border-collapse: collapse;
}
th, td {
padding: 5px;
text-align: left;
}
</style>
</head>
<body>
<h1>Hydro</h1>
<table style="width:100%">
<tr>
<th style="text-align:center">Good Traits</th>
<th style="text-align:center">Bad Traits</th>
</tr>
<tr>
<td style="text-align:left">Hydro power has almost no emissions that impact the climate or the environment.</td>
<td style="text-align:left">Hydro power plants are a significant encroachment on the landscape and impact river ecosystems.</td>
</tr>
<tr>
<td style="text-align:left">Provides large-scale and stable electricity generation.</td>
<td style="text-align:left">Constructing a new hydro power plant requires a substantial investment.</td>
</tr>
<tr>
<td style="text-align:left">Has no fuel costs. Hydro power plants have a long economic life.</td>
<td></td>
</tr>
</table>
</body>
</html>
from IPython.display import Image
Image(url= "https://images.theconversation.com/files/178921/original/file-20170719-13558-rs7g2s.jpg?ixlib=rb-1.1.0&rect=0%2C532%2C4000%2C2377&q=45&auto=format&w=496&fit=clip", width=1000, height=300)
%%html
<head>
<style>
table, th, td {
border: 1px solid black;
border-collapse: collapse;
}
th, td {
padding: 5px;
text-align: left;
}
</style>
</head>
<body>
<h1>Nuclear</h1>
<p></p>
<table style="width:100%">
<tr>
<th style="text-align:center">Good Traits</th>
<th style="text-align:center">Bad Traits</th>
</tr>
<tr>
<td style="text-align:left">Nuclear power emits low levels of CO2 across its life cycle.</td>
<td style="text-align:left">The management of high-level waste requires storage in secure facilities for a very long time.</td>
</tr>
<tr>
<td style="text-align:left">Provides stable and large-scale electricity generation.</td>
<td style="text-align:left">Construction of a new nuclear power plant requires major investments.</td>
</tr>
<tr>
<td style="text-align:left">Costs for fuel, operation and maintenance are normally relatively low.</td>
<td style="text-align:left">If nuclear waste spills or is handled incorrectly it has serious effects on the environment. </td>
</tr>
</table>
</body>
</html>
from IPython.display import Image
Image(url= "https://www.longrefrigeration.com/wp-content/uploads/2017/06/Depositphotos_59228621_s-2015.jpg", width=1000, height=100)
%%html
<head>
<style>
table, th, td {
border: 1px solid black;
border-collapse: collapse;
}
th, td {
padding: 5px;
text-align: left;
}
</style>
</head>
<body>
<h1>Geothermal</h1>
<p></p>
<table style="width:100%">
<tr>
<th style="text-align:center">Good Traits</th>
<th style="text-align:center">Bad Traits</th>
</tr>
<tr>
<td style="text-align:left">It only requires heat from the earth to work, a limitless supply.</td>
<td style="text-align:left">High costs to construct geothermal plants.</td>
</tr>
<tr>
<td style="text-align:left">It is simple and reliable, unlike the unpredictability of solar or wind energy.</td>
<td style="text-align:left">Sites must be located in prime areas, requiring long distance transportation of the resourse through pipe, which is often costly.</td>
</tr>
<tr>
<td style="text-align:left">It is a domestic source of energy found throughout the world. This means that geothermal energy is used in many households across the world, mainly for heating/cooling systems. </td>
<td style="text-align:left">Emits some sulfur dioxide (SO2). </td>
</tr>
</table>
</body>
</html>
| 0.348423 | 0.993771 |
[](https://colab.research.google.com/github/guilbera/colorizing/blob/main/notebooks/pytorch_implementation/pix2pix_model.ipynb)
```
import torch
from torch import nn
from torchsummary import summary
class Generator(nn.Module):
"""Generator of the Pix2Pix model.
For the Lab version, nb_output_channels=2
For the RGB version, nb_output_channels=3"""
def __init__(self, nb_output_channels):
super(Generator, self).__init__()
self.relu = nn.ReLU()
self.leakyrelu = nn.LeakyReLU()
if nb_output_channels == 2:
self.activation = nn.Tanh()
elif nb_output_channels == 3:
self.activation = nn.Sigmoid()
self.conv2d_1 = nn.Conv2d(in_channels=1,out_channels=64,kernel_size=4,stride=2, padding=1, bias=False)
self.batchnorm_1 = nn.BatchNorm2d(64)
self.conv2d_2 = nn.Conv2d(in_channels=64,out_channels=128,kernel_size=4,stride=2, padding=1, bias=False)
self.batchnorm_2 = nn.BatchNorm2d(128)
self.conv2d_3 = nn.Conv2d(in_channels=128,out_channels=256,kernel_size=4,stride=2, padding=1, bias=False)
self.batchnorm_3 = nn.BatchNorm2d(256)
self.conv2d_4 = nn.Conv2d(in_channels=256,out_channels=512,kernel_size=4,stride=2, padding=1, bias=False)
self.batchnorm_4 = nn.BatchNorm2d(512)
self.conv2d_5 = nn.Conv2d(in_channels=512,out_channels=512,kernel_size=4,stride=2, padding=1, bias=False)
self.batchnorm_5 = nn.BatchNorm2d(512)
self.conv2d_6 = nn.Conv2d(in_channels=512,out_channels=512,kernel_size=4,stride=2, padding=1, bias=False)
self.batchnorm_6 = nn.BatchNorm2d(512)
self.conv2d_7 = nn.Conv2d(in_channels=512,out_channels=512,kernel_size=4,stride=2, padding=1, bias=False)
self.batchnorm_7 = nn.BatchNorm2d(512)
self.conv2d_8 = nn.Conv2d(in_channels=512,out_channels=512,kernel_size=4,stride=2, padding=1, bias=False)
self.conv2d_9 = nn.ConvTranspose2d(in_channels=512,out_channels=512,kernel_size=4,stride=2, padding=1, bias=False)
self.batchnorm_9 = nn.BatchNorm2d(512)
self.conv2d_10 = nn.ConvTranspose2d(in_channels=512*2,out_channels=512,kernel_size=4,stride=2, padding=1, bias=False)
self.batchnorm_10 = nn.BatchNorm2d(512)
self.conv2d_11 = nn.ConvTranspose2d(in_channels=512*2,out_channels=512,kernel_size=4,stride=2, padding=1, bias=False)
self.batchnorm_11 = nn.BatchNorm2d(512)
self.conv2d_12 = nn.ConvTranspose2d(in_channels=512*2,out_channels=512,kernel_size=4,stride=2, padding=1, bias=False)
self.batchnorm_12 = nn.BatchNorm2d(512)
self.conv2d_13 = nn.ConvTranspose2d(in_channels=512*2,out_channels=256,kernel_size=4,stride=2, padding=1, bias=False)
self.batchnorm_13 = nn.BatchNorm2d(256)
self.conv2d_14 = nn.ConvTranspose2d(in_channels=256*2,out_channels=128,kernel_size=4,stride=2, padding=1, bias=False)
self.batchnorm_14 = nn.BatchNorm2d(128)
self.conv2d_15 = nn.ConvTranspose2d(in_channels=128*2,out_channels=64,kernel_size=4,stride=2, padding=1, bias=False)
self.batchnorm_15 = nn.BatchNorm2d(64)
self.conv2d_16 = nn.ConvTranspose2d(in_channels=64*2,out_channels=nb_output_channels,kernel_size=4,stride=2, padding=1, bias=True)
def forward(self, encoder_input):
#encoder
encoder_output_1 = self.leakyrelu(self.conv2d_1(encoder_input))
encoder_output_2 = self.leakyrelu(self.batchnorm_2(self.conv2d_2(encoder_output_1)))
encoder_output_3 = self.leakyrelu(self.batchnorm_3(self.conv2d_3(encoder_output_2)))
encoder_output_4 = self.leakyrelu(self.batchnorm_4(self.conv2d_4(encoder_output_3)))
encoder_output_5 = self.leakyrelu(self.batchnorm_5(self.conv2d_5(encoder_output_4)))
encoder_output_6 = self.leakyrelu(self.batchnorm_6(self.conv2d_6(encoder_output_5)))
encoder_output_7 = self.leakyrelu(self.batchnorm_7(self.conv2d_7(encoder_output_6)))
encoder_output = self.conv2d_8(encoder_output_7)
#decoder
decoder_output = self.batchnorm_9(self.conv2d_9(self.relu(encoder_output)))
decoder_output = self.batchnorm_10(self.conv2d_10(self.relu(torch.cat([encoder_output_7,decoder_output],1)))) #skip connection
decoder_output = self.batchnorm_11(self.conv2d_11(self.relu(torch.cat([encoder_output_6,decoder_output],1)))) #skip connection
decoder_output = self.batchnorm_12(self.conv2d_12(self.relu(torch.cat([encoder_output_5,decoder_output],1)))) #skip connection
decoder_output = self.batchnorm_13(self.conv2d_13(self.relu(torch.cat([encoder_output_4,decoder_output],1)))) #skip connection
decoder_output = self.batchnorm_14(self.conv2d_14(self.relu(torch.cat([encoder_output_3,decoder_output],1)))) #skip connection
decoder_output = self.batchnorm_15(self.conv2d_15(self.relu(torch.cat([encoder_output_2,decoder_output],1)))) #skip connection
decoder_output = self.activation(self.conv2d_16(self.relu(torch.cat([encoder_output_1,decoder_output],1)))) #skip connection
return decoder_output
class Discriminator(nn.Module):
"""Patch discriminator of the Pix2Pix model."""
def __init__(self):
super(Discriminator, self).__init__()
self.leakyrelu = nn.LeakyReLU(0.2, True)
self.sigmoid = nn.Sigmoid()
self.conv2d_1 = nn.Conv2d(in_channels=3,out_channels=64,kernel_size=4,stride=2,padding=1, bias=True)
self.conv2d_2 = nn.Conv2d(in_channels=64,out_channels=128,kernel_size=4,stride=2,padding=1, bias=False)
self.batchnorm_2 = nn.BatchNorm2d(128)
self.conv2d_3 = nn.Conv2d(in_channels=128,out_channels=256,kernel_size=4,stride=2,padding=1, bias=False)
self.batchnorm_3 = nn.BatchNorm2d(256)
self.conv2d_4 = nn.Conv2d(in_channels=256,out_channels=512,kernel_size=4,stride=1,padding=1, bias=False)
self.batchnorm_4 = nn.BatchNorm2d(512)
self.conv2d_5 = nn.Conv2d(in_channels=512,out_channels=1,kernel_size=4,stride=1,padding=1,bias=True)
def forward(self, input):
output = self.leakyrelu(self.conv2d_1(input))
output = self.leakyrelu(self.batchnorm_2(self.conv2d_2(output)))
output = self.leakyrelu(self.batchnorm_3(self.conv2d_3(output)))
output = self.leakyrelu(self.batchnorm_4(self.conv2d_4(output)))
output = self.sigmoid(self.conv2d_5(output))
return output
@torch.no_grad()
def init_weights(m, gain=0.02):
"""weight initialisation of the different layers of the Generator and Discriminator"""
if type(m) == nn.Conv2d or type(m) == nn.ConvTranspose2d:
nn.init.normal_(m.weight.data, mean=0.0, std=gain)
if m.bias is not None:
nn.init.constant_(m.bias.data, 0.0)
elif type(m) == nn.BatchNorm2d:
nn.init.normal_(m.weight.data, 1., gain)
nn.init.constant_(m.bias.data, 0.)
class DiscriminatorLoss(nn.Module):
"""for the patch discriminator, the output is a 30x30 tensor
if the image is real, it should return all ones 'real_labels'
if the image is fake, it should return all zeros 'fake_labels'
returns the MSE loss between the output of the discriminator and the label"""
def __init__(self, device):
super().__init__()
self.register_buffer('real_labels', torch.ones([30,30], requires_grad=False, device=device), False)
self.register_buffer('fake_labels', torch.zeros([30,30], requires_grad=False, device=device), False)
#use MSE loss for the discriminator
self.loss = nn.MSELoss()
def forward(self, predictions, target_is_real):
if target_is_real:
target = self.real_labels
else:
target = self.fake_labels
return self.loss(predictions, target.expand_as(predictions))
```
### Summary of the models
```
show_summary = False
if show_summary:
if torch.cuda.is_available():
dev = "cuda:0"
else:
dev = "cpu"
device = torch.device(dev)
net_G = Generator(2).to(device)
net_D = Discriminator().to(device)
summary(net_G, (1, 256, 256))
summary(net_D, (3, 256, 256))
```
|
github_jupyter
|
import torch
from torch import nn
from torchsummary import summary
class Generator(nn.Module):
"""Generator of the Pix2Pix model.
For the Lab version, nb_output_channels=2
For the RGB version, nb_output_channels=3"""
def __init__(self, nb_output_channels):
super(Generator, self).__init__()
self.relu = nn.ReLU()
self.leakyrelu = nn.LeakyReLU()
if nb_output_channels == 2:
self.activation = nn.Tanh()
elif nb_output_channels == 3:
self.activation = nn.Sigmoid()
self.conv2d_1 = nn.Conv2d(in_channels=1,out_channels=64,kernel_size=4,stride=2, padding=1, bias=False)
self.batchnorm_1 = nn.BatchNorm2d(64)
self.conv2d_2 = nn.Conv2d(in_channels=64,out_channels=128,kernel_size=4,stride=2, padding=1, bias=False)
self.batchnorm_2 = nn.BatchNorm2d(128)
self.conv2d_3 = nn.Conv2d(in_channels=128,out_channels=256,kernel_size=4,stride=2, padding=1, bias=False)
self.batchnorm_3 = nn.BatchNorm2d(256)
self.conv2d_4 = nn.Conv2d(in_channels=256,out_channels=512,kernel_size=4,stride=2, padding=1, bias=False)
self.batchnorm_4 = nn.BatchNorm2d(512)
self.conv2d_5 = nn.Conv2d(in_channels=512,out_channels=512,kernel_size=4,stride=2, padding=1, bias=False)
self.batchnorm_5 = nn.BatchNorm2d(512)
self.conv2d_6 = nn.Conv2d(in_channels=512,out_channels=512,kernel_size=4,stride=2, padding=1, bias=False)
self.batchnorm_6 = nn.BatchNorm2d(512)
self.conv2d_7 = nn.Conv2d(in_channels=512,out_channels=512,kernel_size=4,stride=2, padding=1, bias=False)
self.batchnorm_7 = nn.BatchNorm2d(512)
self.conv2d_8 = nn.Conv2d(in_channels=512,out_channels=512,kernel_size=4,stride=2, padding=1, bias=False)
self.conv2d_9 = nn.ConvTranspose2d(in_channels=512,out_channels=512,kernel_size=4,stride=2, padding=1, bias=False)
self.batchnorm_9 = nn.BatchNorm2d(512)
self.conv2d_10 = nn.ConvTranspose2d(in_channels=512*2,out_channels=512,kernel_size=4,stride=2, padding=1, bias=False)
self.batchnorm_10 = nn.BatchNorm2d(512)
self.conv2d_11 = nn.ConvTranspose2d(in_channels=512*2,out_channels=512,kernel_size=4,stride=2, padding=1, bias=False)
self.batchnorm_11 = nn.BatchNorm2d(512)
self.conv2d_12 = nn.ConvTranspose2d(in_channels=512*2,out_channels=512,kernel_size=4,stride=2, padding=1, bias=False)
self.batchnorm_12 = nn.BatchNorm2d(512)
self.conv2d_13 = nn.ConvTranspose2d(in_channels=512*2,out_channels=256,kernel_size=4,stride=2, padding=1, bias=False)
self.batchnorm_13 = nn.BatchNorm2d(256)
self.conv2d_14 = nn.ConvTranspose2d(in_channels=256*2,out_channels=128,kernel_size=4,stride=2, padding=1, bias=False)
self.batchnorm_14 = nn.BatchNorm2d(128)
self.conv2d_15 = nn.ConvTranspose2d(in_channels=128*2,out_channels=64,kernel_size=4,stride=2, padding=1, bias=False)
self.batchnorm_15 = nn.BatchNorm2d(64)
self.conv2d_16 = nn.ConvTranspose2d(in_channels=64*2,out_channels=nb_output_channels,kernel_size=4,stride=2, padding=1, bias=True)
def forward(self, encoder_input):
#encoder
encoder_output_1 = self.leakyrelu(self.conv2d_1(encoder_input))
encoder_output_2 = self.leakyrelu(self.batchnorm_2(self.conv2d_2(encoder_output_1)))
encoder_output_3 = self.leakyrelu(self.batchnorm_3(self.conv2d_3(encoder_output_2)))
encoder_output_4 = self.leakyrelu(self.batchnorm_4(self.conv2d_4(encoder_output_3)))
encoder_output_5 = self.leakyrelu(self.batchnorm_5(self.conv2d_5(encoder_output_4)))
encoder_output_6 = self.leakyrelu(self.batchnorm_6(self.conv2d_6(encoder_output_5)))
encoder_output_7 = self.leakyrelu(self.batchnorm_7(self.conv2d_7(encoder_output_6)))
encoder_output = self.conv2d_8(encoder_output_7)
#decoder
decoder_output = self.batchnorm_9(self.conv2d_9(self.relu(encoder_output)))
decoder_output = self.batchnorm_10(self.conv2d_10(self.relu(torch.cat([encoder_output_7,decoder_output],1)))) #skip connection
decoder_output = self.batchnorm_11(self.conv2d_11(self.relu(torch.cat([encoder_output_6,decoder_output],1)))) #skip connection
decoder_output = self.batchnorm_12(self.conv2d_12(self.relu(torch.cat([encoder_output_5,decoder_output],1)))) #skip connection
decoder_output = self.batchnorm_13(self.conv2d_13(self.relu(torch.cat([encoder_output_4,decoder_output],1)))) #skip connection
decoder_output = self.batchnorm_14(self.conv2d_14(self.relu(torch.cat([encoder_output_3,decoder_output],1)))) #skip connection
decoder_output = self.batchnorm_15(self.conv2d_15(self.relu(torch.cat([encoder_output_2,decoder_output],1)))) #skip connection
decoder_output = self.activation(self.conv2d_16(self.relu(torch.cat([encoder_output_1,decoder_output],1)))) #skip connection
return decoder_output
class Discriminator(nn.Module):
"""Patch discriminator of the Pix2Pix model."""
def __init__(self):
super(Discriminator, self).__init__()
self.leakyrelu = nn.LeakyReLU(0.2, True)
self.sigmoid = nn.Sigmoid()
self.conv2d_1 = nn.Conv2d(in_channels=3,out_channels=64,kernel_size=4,stride=2,padding=1, bias=True)
self.conv2d_2 = nn.Conv2d(in_channels=64,out_channels=128,kernel_size=4,stride=2,padding=1, bias=False)
self.batchnorm_2 = nn.BatchNorm2d(128)
self.conv2d_3 = nn.Conv2d(in_channels=128,out_channels=256,kernel_size=4,stride=2,padding=1, bias=False)
self.batchnorm_3 = nn.BatchNorm2d(256)
self.conv2d_4 = nn.Conv2d(in_channels=256,out_channels=512,kernel_size=4,stride=1,padding=1, bias=False)
self.batchnorm_4 = nn.BatchNorm2d(512)
self.conv2d_5 = nn.Conv2d(in_channels=512,out_channels=1,kernel_size=4,stride=1,padding=1,bias=True)
def forward(self, input):
output = self.leakyrelu(self.conv2d_1(input))
output = self.leakyrelu(self.batchnorm_2(self.conv2d_2(output)))
output = self.leakyrelu(self.batchnorm_3(self.conv2d_3(output)))
output = self.leakyrelu(self.batchnorm_4(self.conv2d_4(output)))
output = self.sigmoid(self.conv2d_5(output))
return output
@torch.no_grad()
def init_weights(m, gain=0.02):
"""weight initialisation of the different layers of the Generator and Discriminator"""
if type(m) == nn.Conv2d or type(m) == nn.ConvTranspose2d:
nn.init.normal_(m.weight.data, mean=0.0, std=gain)
if m.bias is not None:
nn.init.constant_(m.bias.data, 0.0)
elif type(m) == nn.BatchNorm2d:
nn.init.normal_(m.weight.data, 1., gain)
nn.init.constant_(m.bias.data, 0.)
class DiscriminatorLoss(nn.Module):
"""for the patch discriminator, the output is a 30x30 tensor
if the image is real, it should return all ones 'real_labels'
if the image is fake, it should return all zeros 'fake_labels'
returns the MSE loss between the output of the discriminator and the label"""
def __init__(self, device):
super().__init__()
self.register_buffer('real_labels', torch.ones([30,30], requires_grad=False, device=device), False)
self.register_buffer('fake_labels', torch.zeros([30,30], requires_grad=False, device=device), False)
#use MSE loss for the discriminator
self.loss = nn.MSELoss()
def forward(self, predictions, target_is_real):
if target_is_real:
target = self.real_labels
else:
target = self.fake_labels
return self.loss(predictions, target.expand_as(predictions))
show_summary = False
if show_summary:
if torch.cuda.is_available():
dev = "cuda:0"
else:
dev = "cpu"
device = torch.device(dev)
net_G = Generator(2).to(device)
net_D = Discriminator().to(device)
summary(net_G, (1, 256, 256))
summary(net_D, (3, 256, 256))
| 0.947805 | 0.820218 |
# Training the rough Bergomi model part 2
In this notebook we train a neural network for the rough Bergomi model for expiries in the range (0.008,0.03].
Be aware that the datasets are rather large.
### Load, split and scale the datasets
```
MODEL_PART = str(2)
import os, pandas as pd, numpy as np
wd = os.getcwd()
path_seperator = os.path.sep
# Load contract grid:
logMoneyness = pd.read_csv(wd + path_seperator + 'data' + path_seperator + 'logMoneyness.txt', delimiter=",", header = None).values
expiries = pd.read_csv(wd + path_seperator + 'data' + path_seperator + 'expiries.txt', delimiter=",", header = None).values
# Set useful parameters:
nIn = 12
nOut = 150
nXi = 9
# Load training data:
data_train = pd.read_csv(wd + path_seperator + 'data' + path_seperator + 'training_and_test_data' + path_seperator + 'rbergomi_training_data_' + MODEL_PART + '.csv', delimiter=",").values
x_train = data_train[:,:nIn]
y_train = data_train[:,nIn:nIn+nOut]
data_train = None
# Load test data:
data_test = pd.read_csv(wd + path_seperator + 'data' + path_seperator + 'training_and_test_data' + path_seperator + 'rbergomi_test_data_' + MODEL_PART + '.csv', delimiter=",").values
x_valid = data_test[:,:nIn]
y_valid = data_test[:,nIn:nIn+nOut]
data_test = None
# Normalise data:
from sklearn.preprocessing import StandardScaler
tmp1 = np.reshape(np.array([0.50,3.50,0.00]), (1, 3))
tmp2 = np.reshape(np.array([0.00,0.75,-1.00]), (1, 3))
ub = np.concatenate((tmp1,np.tile(1,(1,nXi))),1)
lb = np.concatenate((tmp2,np.tile(0.0025,(1,nXi))),1)
def myscale(x):
res=np.zeros(nIn)
for i in range(nIn):
res[i]=(x[i] - (ub[0,i] + lb[0,i])*0.5) * 2 / (ub[0,i] - lb[0,i])
return res
def myinverse(x):
res=np.zeros(nIn)
for i in range(nIn):
res[i]=x[i]*(ub[0,i] - lb[0,i]) *0.5 + (ub[0,i] + lb[0,i])*0.5
return res
# Scale inputs:
x_train_mod = np.array([myscale(x) for x in x_train])
x_valid_mod = np.array([myscale(x) for x in x_valid])
# Scale and normalise output:
scale_y = StandardScaler()
y_train_mod = scale_y.fit_transform(y_train)
y_valid_mod = scale_y.transform(y_valid)
```
### Define utility functions
```
import keras
from keras.layers import Activation
from keras import backend as K
from keras.utils.generic_utils import get_custom_objects
keras.backend.set_floatx('float64')
def GetNetwork(nIn,nOut,nNodes,nLayers,actFun):
# Description: Creates a neural network of a specified structure
input1 = keras.layers.Input(shape=(nIn,))
layerTmp = keras.layers.Dense(nNodes,activation = actFun)(input1)
for i in range(nLayers-1):
layerTmp = keras.layers.Dense(nNodes,activation = actFun)(layerTmp)
output1 = keras.layers.Dense(nOut,activation = 'linear')(layerTmp)
return(keras.models.Model(inputs=input1, outputs=output1))
def TrainNetwork(nn,batchsize,numEpochs,objFun,optimizer,xTrain,yTrain,xTest,yTest):
# Description: Trains a neural network and returns the network including the history
# of the training process.
nn.compile(loss = objFun, optimizer = optimizer)
history = nn.fit(xTrain, yTrain, batch_size = batchsize,
validation_data = (xTest,yTest),
epochs = numEpochs, verbose = True, shuffle=1)
return nn,history.history['loss'],history.history['val_loss']
def root_mean_squared_error(y_true, y_pred):
return K.sqrt(K.mean(K.square( y_pred - y_true )))
```
### Define and train neural network
<span style="color:red">This section can be skipped! Just go straight to "Load network" and load the already trained model</span>
```
# Define model:
model = GetNetwork(nIn,nOut,200,3,'elu')
# Set seed
import random
random.seed(455165)
# Train network
model,loss1,vloss1 = TrainNetwork(model,32,500,root_mean_squared_error,'adam',x_train_mod,y_train_mod,x_valid_mod,y_valid_mod)
model,loss2,vloss2 = TrainNetwork(model,5000,200,root_mean_squared_error,'adam',x_train_mod,y_train_mod,x_valid_mod,y_valid_mod)
```
### Save network
<span style="color:red">This section can be skipped! Just go straight to "Load network" and load the already trained model</span>
```
# Save model:
model.save(wd + path_seperator + 'data' + path_seperator + 'neural_network_weights' + path_seperator + 'rbergomi_model_' + MODEL_PART + '.h5')
# Save weights (and scalings) in JSON format:
# - You need to install 'json-tricks' first.
# - We need this file for proper import into Matlab, R... etc.
weights_and_more = model.get_weights()
weights_and_more.append(0.5*(ub + lb))
weights_and_more.append(np.power(0.5*(ub - lb),2))
weights_and_more.append(scale_y.mean_)
weights_and_more.append(scale_y.var_)
import codecs, json
for idx, val in enumerate(weights_and_more):
weights_and_more[idx] = weights_and_more[idx].tolist()
json_str = json.dumps(weights_and_more)
text_file = open(wd + path_seperator + 'data' + path_seperator + 'neural_network_weights' + path_seperator + 'rbergomi_weights_' + MODEL_PART + '.json', "w")
text_file.write(json_str)
text_file.close()
```
### Load network
```
# Load already trained neural network:
model = keras.models.load_model(wd + path_seperator + 'data' + path_seperator + 'neural_network_weights' + path_seperator + 'rbergomi' + path_seperator + 'rbergomi_model_' + MODEL_PART + '.h5',
custom_objects={'root_mean_squared_error': root_mean_squared_error})
```
### Validate approximation
```
# Specify test sample to plot:
sample_ind = 5006
# Print parameters of test sample:
print("Model Parameters (H,eta,rho,xi1,xi2,...): ",myinverse(x_valid_mod[sample_ind,:]))
import scipy, matplotlib.pyplot as plt
npts = 25
x_sample = x_valid_mod[sample_ind,:]
y_sample = y_valid_mod[sample_ind,:]
prediction = scale_y.inverse_transform(model.predict(x_valid_mod))
plt.figure(1,figsize=(14,12))
j = -1
for i in range(0,6):
j = j + 1
plt.subplot(4,4,j+1)
plt.plot(logMoneyness[i*npts:(i+1)*npts],y_valid[sample_ind,i*npts:(i+1)*npts],'b',label="True")
plt.plot(logMoneyness[i*npts:(i+1)*npts],prediction[sample_ind,i*npts:(i+1)*npts],'--r',label=" Neural network")
plt.title("Maturity=%1.3f "%expiries[i*npts])
plt.xlabel("log-moneyness")
plt.ylabel("Implied volatility")
plt.legend()
plt.tight_layout()
plt.show()
```
### Save model predictions
```
import pandas as pd
prediction = scale_y.inverse_transform(model.predict(x_valid_mod))
dl = {'prediction' + MODEL_PART: prediction.flatten(), 'y_valid' + MODEL_PART: y_valid.flatten()}
df = pd.DataFrame(data=dl)
df.to_csv(wd + path_seperator + 'data' + path_seperator + 'neural_network_weights' + path_seperator + 'rbergomi' + path_seperator + 'predictions' + path_seperator + 'pred' + MODEL_PART + '.csv', encoding='utf-8', index=False)
y_train.shape
```
|
github_jupyter
|
MODEL_PART = str(2)
import os, pandas as pd, numpy as np
wd = os.getcwd()
path_seperator = os.path.sep
# Load contract grid:
logMoneyness = pd.read_csv(wd + path_seperator + 'data' + path_seperator + 'logMoneyness.txt', delimiter=",", header = None).values
expiries = pd.read_csv(wd + path_seperator + 'data' + path_seperator + 'expiries.txt', delimiter=",", header = None).values
# Set useful parameters:
nIn = 12
nOut = 150
nXi = 9
# Load training data:
data_train = pd.read_csv(wd + path_seperator + 'data' + path_seperator + 'training_and_test_data' + path_seperator + 'rbergomi_training_data_' + MODEL_PART + '.csv', delimiter=",").values
x_train = data_train[:,:nIn]
y_train = data_train[:,nIn:nIn+nOut]
data_train = None
# Load test data:
data_test = pd.read_csv(wd + path_seperator + 'data' + path_seperator + 'training_and_test_data' + path_seperator + 'rbergomi_test_data_' + MODEL_PART + '.csv', delimiter=",").values
x_valid = data_test[:,:nIn]
y_valid = data_test[:,nIn:nIn+nOut]
data_test = None
# Normalise data:
from sklearn.preprocessing import StandardScaler
tmp1 = np.reshape(np.array([0.50,3.50,0.00]), (1, 3))
tmp2 = np.reshape(np.array([0.00,0.75,-1.00]), (1, 3))
ub = np.concatenate((tmp1,np.tile(1,(1,nXi))),1)
lb = np.concatenate((tmp2,np.tile(0.0025,(1,nXi))),1)
def myscale(x):
res=np.zeros(nIn)
for i in range(nIn):
res[i]=(x[i] - (ub[0,i] + lb[0,i])*0.5) * 2 / (ub[0,i] - lb[0,i])
return res
def myinverse(x):
res=np.zeros(nIn)
for i in range(nIn):
res[i]=x[i]*(ub[0,i] - lb[0,i]) *0.5 + (ub[0,i] + lb[0,i])*0.5
return res
# Scale inputs:
x_train_mod = np.array([myscale(x) for x in x_train])
x_valid_mod = np.array([myscale(x) for x in x_valid])
# Scale and normalise output:
scale_y = StandardScaler()
y_train_mod = scale_y.fit_transform(y_train)
y_valid_mod = scale_y.transform(y_valid)
import keras
from keras.layers import Activation
from keras import backend as K
from keras.utils.generic_utils import get_custom_objects
keras.backend.set_floatx('float64')
def GetNetwork(nIn,nOut,nNodes,nLayers,actFun):
# Description: Creates a neural network of a specified structure
input1 = keras.layers.Input(shape=(nIn,))
layerTmp = keras.layers.Dense(nNodes,activation = actFun)(input1)
for i in range(nLayers-1):
layerTmp = keras.layers.Dense(nNodes,activation = actFun)(layerTmp)
output1 = keras.layers.Dense(nOut,activation = 'linear')(layerTmp)
return(keras.models.Model(inputs=input1, outputs=output1))
def TrainNetwork(nn,batchsize,numEpochs,objFun,optimizer,xTrain,yTrain,xTest,yTest):
# Description: Trains a neural network and returns the network including the history
# of the training process.
nn.compile(loss = objFun, optimizer = optimizer)
history = nn.fit(xTrain, yTrain, batch_size = batchsize,
validation_data = (xTest,yTest),
epochs = numEpochs, verbose = True, shuffle=1)
return nn,history.history['loss'],history.history['val_loss']
def root_mean_squared_error(y_true, y_pred):
return K.sqrt(K.mean(K.square( y_pred - y_true )))
# Define model:
model = GetNetwork(nIn,nOut,200,3,'elu')
# Set seed
import random
random.seed(455165)
# Train network
model,loss1,vloss1 = TrainNetwork(model,32,500,root_mean_squared_error,'adam',x_train_mod,y_train_mod,x_valid_mod,y_valid_mod)
model,loss2,vloss2 = TrainNetwork(model,5000,200,root_mean_squared_error,'adam',x_train_mod,y_train_mod,x_valid_mod,y_valid_mod)
# Save model:
model.save(wd + path_seperator + 'data' + path_seperator + 'neural_network_weights' + path_seperator + 'rbergomi_model_' + MODEL_PART + '.h5')
# Save weights (and scalings) in JSON format:
# - You need to install 'json-tricks' first.
# - We need this file for proper import into Matlab, R... etc.
weights_and_more = model.get_weights()
weights_and_more.append(0.5*(ub + lb))
weights_and_more.append(np.power(0.5*(ub - lb),2))
weights_and_more.append(scale_y.mean_)
weights_and_more.append(scale_y.var_)
import codecs, json
for idx, val in enumerate(weights_and_more):
weights_and_more[idx] = weights_and_more[idx].tolist()
json_str = json.dumps(weights_and_more)
text_file = open(wd + path_seperator + 'data' + path_seperator + 'neural_network_weights' + path_seperator + 'rbergomi_weights_' + MODEL_PART + '.json', "w")
text_file.write(json_str)
text_file.close()
# Load already trained neural network:
model = keras.models.load_model(wd + path_seperator + 'data' + path_seperator + 'neural_network_weights' + path_seperator + 'rbergomi' + path_seperator + 'rbergomi_model_' + MODEL_PART + '.h5',
custom_objects={'root_mean_squared_error': root_mean_squared_error})
# Specify test sample to plot:
sample_ind = 5006
# Print parameters of test sample:
print("Model Parameters (H,eta,rho,xi1,xi2,...): ",myinverse(x_valid_mod[sample_ind,:]))
import scipy, matplotlib.pyplot as plt
npts = 25
x_sample = x_valid_mod[sample_ind,:]
y_sample = y_valid_mod[sample_ind,:]
prediction = scale_y.inverse_transform(model.predict(x_valid_mod))
plt.figure(1,figsize=(14,12))
j = -1
for i in range(0,6):
j = j + 1
plt.subplot(4,4,j+1)
plt.plot(logMoneyness[i*npts:(i+1)*npts],y_valid[sample_ind,i*npts:(i+1)*npts],'b',label="True")
plt.plot(logMoneyness[i*npts:(i+1)*npts],prediction[sample_ind,i*npts:(i+1)*npts],'--r',label=" Neural network")
plt.title("Maturity=%1.3f "%expiries[i*npts])
plt.xlabel("log-moneyness")
plt.ylabel("Implied volatility")
plt.legend()
plt.tight_layout()
plt.show()
import pandas as pd
prediction = scale_y.inverse_transform(model.predict(x_valid_mod))
dl = {'prediction' + MODEL_PART: prediction.flatten(), 'y_valid' + MODEL_PART: y_valid.flatten()}
df = pd.DataFrame(data=dl)
df.to_csv(wd + path_seperator + 'data' + path_seperator + 'neural_network_weights' + path_seperator + 'rbergomi' + path_seperator + 'predictions' + path_seperator + 'pred' + MODEL_PART + '.csv', encoding='utf-8', index=False)
y_train.shape
| 0.668447 | 0.913792 |
```
%pylab inline
%config InlineBackend.figure_format = 'retina'
from ipywidgets import interact
```
Turn in an image (e.g., screenshot) or PDF copy of any code that is part of your answer. Make sure all images and PDF pages are properly rotated. Make sure that all pages are clearly visible.
Tips: Use the document scanner function on your smart phone to take better page "scans" using your camera. Make sure your screen is not shifted toward warmer colours (some devices filter blue light at night) giving it a dim and orange appearance.
# Q1
Let $Q$ be an orthogonal $m \times m$ matrix and $\hat{R}$ be an $n\times n$ upper triangular matrix, $m>n$, such that
$$A = Q
\begin{bmatrix}
\hat{R} \\ \mathbf{0}
\end{bmatrix}
$$
## A
Show that if the diagonal elements of $\hat{R}$ all nonzero then $A$ is full rank.
## B
Show that if $A$ is full rank then the diagonal elements of $\hat{R}$ all nonzero.
## C
Let $\hat{Q}$ be $m\times n$ with orthonormal columns (so that $\hat{Q}^T\hat{Q} = I$ but $\hat{Q}$ is not invertible) such that
$$ A = \hat{Q}\hat{R} .$$
Repeat part A and B for the above reduced QR decomposition.
# Q2
Let $A\in \mathbb{R}^{m\times n}$ ($m>n$) be full rank. Let the SVD decomposition of $A$ be written as
$$ A =
\begin{bmatrix}
\hat{U} & U_0
\end{bmatrix}
\begin{bmatrix}
\hat{\Sigma} \\ \mathbf{0}
\end{bmatrix}
V^T,
$$
where $\hat{U}$ is $m\times n$, $U_0$ is $m\times(m-n)$, $\hat{\Sigma}$ is $n\times n$, and $V$ is $n\times n$. Use the above SVD to derive a formula for the pseudo inverse of $A$ in terms of $\hat{U}$, $\hat{\Sigma}$, and $V$.
# Q3
Take $m=50$, $n=12$. Use the function `linspace` to produce an array $t$ of $m$ equally spaced points between on $[0, 1]$. Using two loops (or whatever method you like), produce the $m \times n$ matrix
$$ A =
\begin{bmatrix}
1 & t_1 & t_1^2 & \cdots & t_1^{n-1} \\
1 & t_2 & t_2^2 & \cdots & t_2^{n-1} \\
\vdots & \vdots & \vdots & & \vdots \\
1 & t_m & t_m^2 & \cdots & t_m^{n-1}
\end{bmatrix}.
$$
Produce an array $b = \cos(4t)$ which has $m$ elements just like $t$. This can be done with the command `b = cos(4*t)`. Calculate the least squares solution $x$ to the equation $Ax = b$ using three different methods:
1. The normal equations using the function `cholesky`
2. The QR decomposition using the function `qr`
3. The SVD decomposition using the function `svd`
You may want to look at the help documentation for each of these functions (e.g., by using the command `help(cholesky)`). You will also want to use examples from the Week 9 Jupyter notebook, where you will find two functions: `backward_substitution` and `forward_substitution`. Copy these two functions into your homework notebook and use them to solve upper and lower triangular systems.
These calculations above will produce three lists of twelve coefficients. In each list, shade with red pen the digits that appear to be wrong (affected by rounding error). Comment on what differences you observe. Do the normal equations exhibit instability?
**Hint: $A$ is a Vandermonde matrix. When we were studying polynomial interpolation, I told you that we don't like working with this matrix directly. This is because it is ill-conditioned. You should be able to say exactly what that means now. What is the condition number of $A$? Is there a command to compute it Python> What do we know about the stability of the three algorithms?**
|
github_jupyter
|
%pylab inline
%config InlineBackend.figure_format = 'retina'
from ipywidgets import interact
| 0.26971 | 0.943815 |
# Randomness and reproducibility
Random numbers and [stochastic processes](http://www2.econ.iastate.edu/tesfatsi/ace.htm#Stochasticity)
are essential to most agent-based models.
[Pseudo-random number generators](https://en.wikipedia.org/wiki/Pseudorandom_number_generator)
can be used to create numbers in a sequence that appears
random but is actually a deterministic sequence based on an initial seed value.
In other words, the generator will produce the same pseudo-random sequence
over multiple runs if it is given the same seed at the beginning.
Note that is possible that the generators will draw the same number repeatedly,
as illustrated in this [comic strip](https://dilbert.com/strip/2001-10-25) from Scott Adams:

```
import agentpy as ap
import numpy as np
import random
```
## Random number generators
Agentpy models contain two internal pseudo-random number generators with different features:
- `Model.random` is an instance of `random.Random` (more info [here](https://realpython.com/python-random/))
- `Model.nprandom` is an instance of `numpy.random.Generator` (more info [here](https://numpy.org/devdocs/reference/random/index.html))
To illustrate, let us define a model that uses both generators to draw a random integer:
```
class RandomModel(ap.Model):
def setup(self):
self.x = self.random.randint(0, 99)
self.y = self.nprandom.integers(99)
self.report(['x', 'y'])
self.stop()
```
If we run this model multiple times, we will likely get a different series of numbers in each iteration:
```
exp = ap.Experiment(RandomModel, iterations=5)
results = exp.run()
results.reporters
```
## Defining custom seeds
If we want the results to be reproducible,
we can define a parameter `seed` that
will be used automatically at the beginning of a simulation
to initialize both generators.
```
parameters = {'seed': 42}
exp = ap.Experiment(RandomModel, parameters, iterations=5)
results = exp.run()
```
By default, the experiment will use this seed to generate different random seeds for each iteration:
```
results.reporters
```
Repeating this experiment will yield the same results:
```
exp2 = ap.Experiment(RandomModel, parameters, iterations=5)
results2 = exp2.run()
results2.reporters
```
Alternatively, we can set the argument `randomize=False` so that the experiment will use the same seed for each iteration:
```
exp3 = ap.Experiment(RandomModel, parameters, iterations=5, randomize=False)
results3 = exp3.run()
```
Now, each iteration yields the same results:
```
results3.reporters
```
## Sampling seeds
For a sample with multiple parameter combinations, we can treat the seed like any other parameter.
The following example will use the same seed for each parameter combination:
```
parameters = {'p': ap.Values(0, 1), 'seed': 0}
sample1 = ap.Sample(parameters, randomize=False)
list(sample1)
```
If we run an experiment with this sample,
the same iteration of each parameter combination will have the same seed (remember that the experiment will generate different seeds for each iteration by default):
```
exp = ap.Experiment(RandomModel, sample1, iterations=2)
results = exp.run()
results.reporters
```
Alternatively, we can use `Sample` with `randomize=True` (default)
to generate random seeds for each parameter combination in the sample.
```
sample3 = ap.Sample(parameters, randomize=True)
list(sample3)
```
This will always generate the same set of random seeds:
```
sample3 = ap.Sample(parameters)
list(sample3)
```
An experiment will now have different results for every parameter combination and iteration:
```
exp = ap.Experiment(RandomModel, sample3, iterations=2)
results = exp.run()
results.reporters
```
Repeating this experiment will yield the same results:
```
exp = ap.Experiment(RandomModel, sample3, iterations=2)
results = exp.run()
results.reporters
```
## Stochastic methods of AgentList
Let us now look at some stochastic operations that are often used in agent-based models.
To start, we create a list of five agents:
```
model = ap.Model()
agents = ap.AgentList(model, 5)
agents
```
If we look at the agent's ids, we see that they have been created in order:
```
agents.id
```
To shuffle this list, we can use `AgentList.shuffle`:
```
agents.shuffle().id
```
To create a random subset, we can use `AgentList.random`:
```
agents.random(3).id
```
And if we want it to be possible to select the same agent more than once:
```
agents.random(6, replace=True).id
```
## Agent-specific generators
For more advanced applications, we can create separate generators for each object.
We can ensure that the seeds of each object follow a controlled pseudo-random sequence by using the models' main generator to generate the seeds.
```
class RandomAgent(ap.Agent):
def setup(self):
seed = self.model.random.getrandbits(128) # Seed from model
self.random = random.Random(seed) # Create agent generator
self.x = self.random.random() # Create a random number
class MultiRandomModel(ap.Model):
def setup(self):
self.agents = ap.AgentList(self, 2, RandomAgent)
self.agents.record('x')
self.stop()
parameters = {'seed': 42}
exp = ap.Experiment(
MultiRandomModel, parameters, iterations=2,
record=True, randomize=False)
results = exp.run()
results.variables.RandomAgent
```
Alternatively, we can also have each agent start from the same seed:
```
class RandomAgent2(ap.Agent):
def setup(self):
self.random = random.Random(self.p.agent_seed) # Create agent generator
self.x = self.random.random() # Create a random number
class MultiRandomModel2(ap.Model):
def setup(self):
self.agents = ap.AgentList(self, 2, RandomAgent2)
self.agents.record('x')
self.stop()
parameters = {'agent_seed': 42}
exp = ap.Experiment(
MultiRandomModel2, parameters, iterations=2,
record=True, randomize=False)
results = exp.run()
results.variables.RandomAgent2
```
|
github_jupyter
|
import agentpy as ap
import numpy as np
import random
class RandomModel(ap.Model):
def setup(self):
self.x = self.random.randint(0, 99)
self.y = self.nprandom.integers(99)
self.report(['x', 'y'])
self.stop()
exp = ap.Experiment(RandomModel, iterations=5)
results = exp.run()
results.reporters
parameters = {'seed': 42}
exp = ap.Experiment(RandomModel, parameters, iterations=5)
results = exp.run()
results.reporters
exp2 = ap.Experiment(RandomModel, parameters, iterations=5)
results2 = exp2.run()
results2.reporters
exp3 = ap.Experiment(RandomModel, parameters, iterations=5, randomize=False)
results3 = exp3.run()
results3.reporters
parameters = {'p': ap.Values(0, 1), 'seed': 0}
sample1 = ap.Sample(parameters, randomize=False)
list(sample1)
exp = ap.Experiment(RandomModel, sample1, iterations=2)
results = exp.run()
results.reporters
sample3 = ap.Sample(parameters, randomize=True)
list(sample3)
sample3 = ap.Sample(parameters)
list(sample3)
exp = ap.Experiment(RandomModel, sample3, iterations=2)
results = exp.run()
results.reporters
exp = ap.Experiment(RandomModel, sample3, iterations=2)
results = exp.run()
results.reporters
model = ap.Model()
agents = ap.AgentList(model, 5)
agents
agents.id
agents.shuffle().id
agents.random(3).id
agents.random(6, replace=True).id
class RandomAgent(ap.Agent):
def setup(self):
seed = self.model.random.getrandbits(128) # Seed from model
self.random = random.Random(seed) # Create agent generator
self.x = self.random.random() # Create a random number
class MultiRandomModel(ap.Model):
def setup(self):
self.agents = ap.AgentList(self, 2, RandomAgent)
self.agents.record('x')
self.stop()
parameters = {'seed': 42}
exp = ap.Experiment(
MultiRandomModel, parameters, iterations=2,
record=True, randomize=False)
results = exp.run()
results.variables.RandomAgent
class RandomAgent2(ap.Agent):
def setup(self):
self.random = random.Random(self.p.agent_seed) # Create agent generator
self.x = self.random.random() # Create a random number
class MultiRandomModel2(ap.Model):
def setup(self):
self.agents = ap.AgentList(self, 2, RandomAgent2)
self.agents.record('x')
self.stop()
parameters = {'agent_seed': 42}
exp = ap.Experiment(
MultiRandomModel2, parameters, iterations=2,
record=True, randomize=False)
results = exp.run()
results.variables.RandomAgent2
| 0.383526 | 0.985129 |
```
from utils import config, sample_utils as su, parse_midas_data
from collections import defaultdict
import numpy as np, math
from matplotlib import pyplot as plt
import bz2
sample_subject_map = su.parse_sample_subject_map()
subject_sample_map = su.parse_subject_sample_map()
sample_order_map = su.parse_sample_order_map()
infant_samples = su.get_sample_names('infant')
olm_samples = su.get_sample_names('olm')
infant_samples = [sample for sample in infant_samples if sample not in olm_samples]
samples = su.get_sample_names('backhed')
good_species_list = parse_midas_data.load_pickled_good_species_list()
# Load alpha diversity, richness at different timepoints
# Relative abundance file
relab_fpath = "%s/species/relative_abundance.txt.bz2" % (config.data_directory)
relab_file = open(relab_fpath, 'r')
decompressor = bz2.BZ2Decompressor()
raw = decompressor.decompress(relab_file.read())
data = [row.split('\t') for row in raw.split('\n')]
data.pop() # Get rid of extra element due to terminal newline
header = su.parse_merged_sample_names(data[0])
# Generate alpha diversity dictionary
alpha_div_dict = {}
richness_dict = {}
relab_dict = defaultdict(dict) # sample -> species -> relab
for i in range(1, len(header)):
sample = header[i]
for row in data[1:]:
species = row[0]
rel_ab = float(row[i])
if rel_ab > 0:
relab_dict[sample][species] = rel_ab
acc = 0
richness = 0
for row in data[1:]:
rel_ab = float(row[i])
if rel_ab != 0:
acc += (rel_ab * math.log(rel_ab))
richness += 1
alpha_div_dict[header[i]] = (acc*-1)
richness_dict[header[i]] = richness
subjects = set([sample_subject_map[sample][:-2] for sample in samples])
backhed_order_tp_dict = {1: 'Birth', 2: 'Month 4', 3: 'Month 12'}
# Reformat into subject -> tp -> alpha diversity
subject_tp_alpha_div = {subject: defaultdict(dict) for subject in subjects}
for sample in samples:
subject, mi_type = sample_subject_map[sample].split('-')
order = sample_order_map[sample][1]
if mi_type == 'M':
tp = 'Mother'
elif mi_type == 'I':
tp = backhed_order_tp_dict[order]
subject_tp_alpha_div[subject][tp] = alpha_div_dict[sample]
# Load data from Ricky
data_dir = config.data_directory
f = open('%s/strains/Backhed_2015_strain_number.csv' % data_dir, 'r')
f.readline()
all_species = set()
sample_species_strain_count = defaultdict(dict)
possible_counts = set()
for line in f:
sample, species, count = line.strip().split(',')
sample_species_strain_count[sample][species] = int(count)
possible_counts.add(int(count))
all_species.add(species)
subjects = set([sample_subject_map[sample][:-2] for sample in sample_species_strain_count])
backhed_order_tp_dict = {1: 'Birth', 2: 'Month 4', 3: 'Month 12'}
# Reformat into subject -> species -> tp -> count
subject_tp_strain_count = {subject: defaultdict(dict) for subject in subjects}
# subject -> tp -> strain richness
subject_tp_strain_richness = {subject: {} for subject in subjects}
for sample in sample_species_strain_count:
subject, mi_type = sample_subject_map[sample].split('-')
order = sample_order_map[sample][1]
if mi_type == 'M':
tp = 'Mother'
elif mi_type == 'I':
tp = backhed_order_tp_dict[order]
strain_richness = 0
for species in sample_species_strain_count[sample]:
count = sample_species_strain_count[sample][species]
subject_tp_strain_count[subject][species][tp] = count
strain_richness += count
subject_tp_strain_richness[subject][tp] = strain_richness
tps_ordered = ['Mother', 'Birth', 'Month 4', 'Month 12']
tp_x_dict = {'Mother': 0, 'Birth': 1, 'Month 4': 2, 'Month 12': 3}
xs = np.arange(len(tps_ordered))
fig, ax = plt.subplots(1, 2, figsize=(12, 4))
for subject in subject_tp_alpha_div:
alpha_divs = []
xs_subset = []
for tp in tps_ordered:
if tp in subject_tp_alpha_div[subject]:
alpha_divs.append(subject_tp_alpha_div[subject][tp])
xs_subset.append(tp_x_dict[tp])
ax[0].plot(xs_subset, alpha_divs, '.-', alpha=0.5)
ax[0].set_ylabel("Species alpha diversity")
ax[0].set_xticks(xs)
ax[0].set_xticklabels(tps_ordered)
ax[0].text(-0.165, 0.92, 'A', size=20, transform=ax[0].transAxes, weight='bold')
for subject in subject_tp_strain_richness:
richnesses = []
xs_subset = []
for tp in tps_ordered:
if tp in subject_tp_strain_richness[subject]:
richnesses.append(subject_tp_strain_richness[subject][tp])
xs_subset.append(tp_x_dict[tp])
ax[1].plot(xs_subset, richnesses, '.-', alpha=0.5)
ax[1].set_ylabel("Strain richness")
ax[1].set_xticks(xs)
ax[1].set_xticklabels(tps_ordered)
ax[1].text(-0.165, 0.92, 'B', size=20, transform=ax[1].transAxes, weight='bold')
plt.show()
fig.savefig('%s/backhed_strain_richness.pdf' % config.analysis_directory)
# Version 2
tps_ordered = ['Mother', 'Birth', 'Month 4', 'Month 12']
tp_x_dict = {'Mother': 0, 'Birth': 1, 'Month 4': 2, 'Month 12': 3}
xs = np.arange(len(tps_ordered))
fig, ax = plt.subplots(1, 2, figsize=(12, 4))
for subject in subject_tp_strain_richness:
richnesses = []
xs_subset = []
for tp in tps_ordered:
if tp in subject_tp_strain_richness[subject]:
richnesses.append(subject_tp_strain_richness[subject][tp])
xs_subset.append(tp_x_dict[tp])
ax[0].plot(xs_subset, richnesses, '.-', alpha=0.5)
ax[0].set_ylabel("Strain richness")
ax[0].set_xticks(xs)
ax[0].set_xticklabels(tps_ordered)
ax[0].text(-0.165, 0.92, 'A', size=20, transform=ax[0].transAxes, weight='bold')
# E. coli boxplot
tps_ordered = ['Mother', 'Birth', 'Month 4', 'Month 12']
xs = np.arange(len(tps_ordered))
species = 'Escherichia_coli_58110'
tp_count_dict = defaultdict(list)
for subject in subject_tp_strain_count:
for tp in tps_ordered:
if tp in subject_tp_strain_count[subject][species]:
tp_count_dict[tp].append(subject_tp_strain_count[subject][species][tp])
else:
tp_count_dict[tp].append(0)
counts = [tp_count_dict[tp] for tp in tps_ordered]
for tp in tps_ordered:
print(tp)
print(len(tp_count_dict[tp]))
ax[1].boxplot(counts)
ax[1].text(-0.165, 0.92, 'B', size=20, transform=ax[1].transAxes, weight='bold')
ax[1].set_ylabel("Strain count\n" + r'($\it{Escherichia}$ $\it{coli}$)')
ax[1].set_yticks([0,1,2,3,4])
ax[1].set_xticks(xs + 1)
ax[1].set_xticklabels(tps_ordered)
plt.show()
fig.savefig('%s/backhed_strain_richness_v2.pdf' % config.analysis_directory)
# Idea: have thickness of line be proportionate to number of instances
tps_ordered = ['Mother', 'Birth', 'Month 4', 'Month 12']
xs = np.arange(len(tps_ordered))
species = 'Escherichia_coli_58110'
fig, ax = plt.subplots()
counts_count_dict = defaultdict(int)
for subject in subject_tp_strain_count:
counts = []
for tp in tps_ordered:
if tp in subject_tp_strain_count[subject][species]:
counts.append(subject_tp_strain_count[subject][species][tp])
else:
counts.append(0)
counts_count_dict[tuple(counts)] += 1
for counts in counts_count_dict:
ax.plot(xs, counts, '.-', linewidth=counts_count_dict[counts], alpha=0.5)
ax.set_ylabel("Strain count")
ax.set_yticks([0,1,2,3,4])
ax.set_xticks(xs)
ax.set_xticklabels(tps_ordered)
plt.show()
# E. coli boxplot
tps_ordered = ['Mother', 'Birth', 'Month 4', 'Month 12']
xs = np.arange(len(tps_ordered))
species = 'Escherichia_coli_58110'
fig, ax = plt.subplots()
tp_count_dict = defaultdict(list)
for subject in subject_tp_strain_count:
for tp in tps_ordered:
if tp in subject_tp_strain_count[subject][species]:
tp_count_dict[tp].append(subject_tp_strain_count[subject][species][tp])
else:
tp_count_dict[tp].append(0)
counts = [tp_count_dict[tp] for tp in tps_ordered]
means = [np.mean(tp_count_dict[tp]) for tp in tps_ordered]
for tp in tps_ordered:
print(tp)
print(len(tp_count_dict[tp]))
print(np.mean(tp_count_dict[tp]))
ax.boxplot(counts)
ax.plot(xs + 1, means, '.-', markersize=10)
ax.set_ylabel("Strain count")
ax.set_yticks([0,1,2,3,4])
ax.set_xticks(xs + 1)
ax.set_xticklabels(tps_ordered)
plt.show()
# Load pickles
import pickle
# Pickle directory
pickle_dir = "%s/pickles" % config.data_directory
sample_species_polymorphism_dict = pickle.load(open("%s/sample_species_polymorphism_dict.pkl" % (pickle_dir), 'rb'))
# How does strain number relate to polymorphism rate?
species = 'Escherichia_coli_58110'
polymorphisms = []
strain_counts = []
for sample in sample_species_strain_count:
if species in sample_species_polymorphism_dict[sample] and species in sample_species_strain_count[sample]:
polymorphism = sample_species_polymorphism_dict[sample][species]
strain_count = sample_species_strain_count[sample][species]
polymorphisms.append(polymorphism)
strain_counts.append(strain_count)
plt.plot(polymorphisms, strain_counts, '.')
plt.show()
backhed_tp_day_dict = {'Birth': 3, 'Mother': 2, 'Month 4': 122, 'Month 12': 366}
# Generate table for processing with R, specifically
# Fit linear model:
# Num strains E. coli ~ time + host (random effect)
species = 'Escherichia_coli_58110'
output_file = open('%s/%s_strain_data.csv' % (config.analysis_directory, species), 'wb')
output_file.write(','.join(['num_strains', 'time', 'host']) + '\n')
for subject in subject_tp_strain_count:
for tp in tps_ordered:
if tp in subject_tp_strain_count[subject][species]:
day = backhed_tp_day_dict[tp]
output_file.write(','.join([str(v) for v in [subject_tp_strain_count[subject][species][tp], day, subject]]) + '\n')
else:
output_file.write(','.join([str(v) for v in [0, day, subject]]) + '\n')
output_file.close()
# Find most prevalent species among infants
species_nonzero_count = defaultdict(int)
species_relabs = defaultdict(list)
for sample in infant_samples:
for species in relab_dict[sample]:
species_relabs[species].append(relab_dict[sample][species])
if relab_dict[sample][species] > 0:
species_nonzero_count[species] += 1
species_infant_prev_ordered = []
for species, count in sorted(species_nonzero_count.items(), key=lambda x: x[1], reverse=True):
species_infant_prev_ordered.append(species)
species_avg_relabs = {species: np.mean(species_relabs[species]) for species in species_relabs}
species_infant_relab_ordered = []
for species, avg_relab in sorted(species_avg_relabs.items(), key=lambda x: x[1], reverse=True):
species_infant_relab_ordered.append(species)
# Idea: have thickness of line be proportionate to number of instances
tps_ordered = ['Mother', 'Birth', 'Month 4', 'Month 12']
xs = np.arange(len(tps_ordered))
idx_matrix = np.transpose(np.meshgrid(np.arange(4), np.arange(4)))
idx_pairs = []
for row in idx_matrix:
idx_pairs += list(row)
fig, ax = plt.subplots(4, 4, figsize=(15, 10), sharey=True, sharex=True)
for species, idx_pair in zip(species_infant_prev_ordered[:16], idx_pairs):
i, j = idx_pair
counts_count_dict = defaultdict(int)
for subject in subject_tp_strain_count:
counts = []
for tp in tps_ordered:
if tp in subject_tp_strain_count[subject][species]:
counts.append(subject_tp_strain_count[subject][species][tp])
else:
counts.append(0)
counts_count_dict[tuple(counts)] += 1
for counts in counts_count_dict:
ax[i][j].plot(xs, counts, '.-', linewidth=counts_count_dict[counts], alpha=0.5)
ax[i][j].set_title(species)
ax[i][0].set_ylabel("Strain count")
ax[0][0].set_yticks([0,1,2,3,4])
ax[0][0].set_xticks(xs)
ax[0][0].set_xticklabels(tps_ordered)
plt.subplots_adjust(wspace=0)
plt.tight_layout()
plt.show()
fig.savefig('%s/backhed_strain_count_by_species.pdf' % config.analysis_directory)
```
|
github_jupyter
|
from utils import config, sample_utils as su, parse_midas_data
from collections import defaultdict
import numpy as np, math
from matplotlib import pyplot as plt
import bz2
sample_subject_map = su.parse_sample_subject_map()
subject_sample_map = su.parse_subject_sample_map()
sample_order_map = su.parse_sample_order_map()
infant_samples = su.get_sample_names('infant')
olm_samples = su.get_sample_names('olm')
infant_samples = [sample for sample in infant_samples if sample not in olm_samples]
samples = su.get_sample_names('backhed')
good_species_list = parse_midas_data.load_pickled_good_species_list()
# Load alpha diversity, richness at different timepoints
# Relative abundance file
relab_fpath = "%s/species/relative_abundance.txt.bz2" % (config.data_directory)
relab_file = open(relab_fpath, 'r')
decompressor = bz2.BZ2Decompressor()
raw = decompressor.decompress(relab_file.read())
data = [row.split('\t') for row in raw.split('\n')]
data.pop() # Get rid of extra element due to terminal newline
header = su.parse_merged_sample_names(data[0])
# Generate alpha diversity dictionary
alpha_div_dict = {}
richness_dict = {}
relab_dict = defaultdict(dict) # sample -> species -> relab
for i in range(1, len(header)):
sample = header[i]
for row in data[1:]:
species = row[0]
rel_ab = float(row[i])
if rel_ab > 0:
relab_dict[sample][species] = rel_ab
acc = 0
richness = 0
for row in data[1:]:
rel_ab = float(row[i])
if rel_ab != 0:
acc += (rel_ab * math.log(rel_ab))
richness += 1
alpha_div_dict[header[i]] = (acc*-1)
richness_dict[header[i]] = richness
subjects = set([sample_subject_map[sample][:-2] for sample in samples])
backhed_order_tp_dict = {1: 'Birth', 2: 'Month 4', 3: 'Month 12'}
# Reformat into subject -> tp -> alpha diversity
subject_tp_alpha_div = {subject: defaultdict(dict) for subject in subjects}
for sample in samples:
subject, mi_type = sample_subject_map[sample].split('-')
order = sample_order_map[sample][1]
if mi_type == 'M':
tp = 'Mother'
elif mi_type == 'I':
tp = backhed_order_tp_dict[order]
subject_tp_alpha_div[subject][tp] = alpha_div_dict[sample]
# Load data from Ricky
data_dir = config.data_directory
f = open('%s/strains/Backhed_2015_strain_number.csv' % data_dir, 'r')
f.readline()
all_species = set()
sample_species_strain_count = defaultdict(dict)
possible_counts = set()
for line in f:
sample, species, count = line.strip().split(',')
sample_species_strain_count[sample][species] = int(count)
possible_counts.add(int(count))
all_species.add(species)
subjects = set([sample_subject_map[sample][:-2] for sample in sample_species_strain_count])
backhed_order_tp_dict = {1: 'Birth', 2: 'Month 4', 3: 'Month 12'}
# Reformat into subject -> species -> tp -> count
subject_tp_strain_count = {subject: defaultdict(dict) for subject in subjects}
# subject -> tp -> strain richness
subject_tp_strain_richness = {subject: {} for subject in subjects}
for sample in sample_species_strain_count:
subject, mi_type = sample_subject_map[sample].split('-')
order = sample_order_map[sample][1]
if mi_type == 'M':
tp = 'Mother'
elif mi_type == 'I':
tp = backhed_order_tp_dict[order]
strain_richness = 0
for species in sample_species_strain_count[sample]:
count = sample_species_strain_count[sample][species]
subject_tp_strain_count[subject][species][tp] = count
strain_richness += count
subject_tp_strain_richness[subject][tp] = strain_richness
tps_ordered = ['Mother', 'Birth', 'Month 4', 'Month 12']
tp_x_dict = {'Mother': 0, 'Birth': 1, 'Month 4': 2, 'Month 12': 3}
xs = np.arange(len(tps_ordered))
fig, ax = plt.subplots(1, 2, figsize=(12, 4))
for subject in subject_tp_alpha_div:
alpha_divs = []
xs_subset = []
for tp in tps_ordered:
if tp in subject_tp_alpha_div[subject]:
alpha_divs.append(subject_tp_alpha_div[subject][tp])
xs_subset.append(tp_x_dict[tp])
ax[0].plot(xs_subset, alpha_divs, '.-', alpha=0.5)
ax[0].set_ylabel("Species alpha diversity")
ax[0].set_xticks(xs)
ax[0].set_xticklabels(tps_ordered)
ax[0].text(-0.165, 0.92, 'A', size=20, transform=ax[0].transAxes, weight='bold')
for subject in subject_tp_strain_richness:
richnesses = []
xs_subset = []
for tp in tps_ordered:
if tp in subject_tp_strain_richness[subject]:
richnesses.append(subject_tp_strain_richness[subject][tp])
xs_subset.append(tp_x_dict[tp])
ax[1].plot(xs_subset, richnesses, '.-', alpha=0.5)
ax[1].set_ylabel("Strain richness")
ax[1].set_xticks(xs)
ax[1].set_xticklabels(tps_ordered)
ax[1].text(-0.165, 0.92, 'B', size=20, transform=ax[1].transAxes, weight='bold')
plt.show()
fig.savefig('%s/backhed_strain_richness.pdf' % config.analysis_directory)
# Version 2
tps_ordered = ['Mother', 'Birth', 'Month 4', 'Month 12']
tp_x_dict = {'Mother': 0, 'Birth': 1, 'Month 4': 2, 'Month 12': 3}
xs = np.arange(len(tps_ordered))
fig, ax = plt.subplots(1, 2, figsize=(12, 4))
for subject in subject_tp_strain_richness:
richnesses = []
xs_subset = []
for tp in tps_ordered:
if tp in subject_tp_strain_richness[subject]:
richnesses.append(subject_tp_strain_richness[subject][tp])
xs_subset.append(tp_x_dict[tp])
ax[0].plot(xs_subset, richnesses, '.-', alpha=0.5)
ax[0].set_ylabel("Strain richness")
ax[0].set_xticks(xs)
ax[0].set_xticklabels(tps_ordered)
ax[0].text(-0.165, 0.92, 'A', size=20, transform=ax[0].transAxes, weight='bold')
# E. coli boxplot
tps_ordered = ['Mother', 'Birth', 'Month 4', 'Month 12']
xs = np.arange(len(tps_ordered))
species = 'Escherichia_coli_58110'
tp_count_dict = defaultdict(list)
for subject in subject_tp_strain_count:
for tp in tps_ordered:
if tp in subject_tp_strain_count[subject][species]:
tp_count_dict[tp].append(subject_tp_strain_count[subject][species][tp])
else:
tp_count_dict[tp].append(0)
counts = [tp_count_dict[tp] for tp in tps_ordered]
for tp in tps_ordered:
print(tp)
print(len(tp_count_dict[tp]))
ax[1].boxplot(counts)
ax[1].text(-0.165, 0.92, 'B', size=20, transform=ax[1].transAxes, weight='bold')
ax[1].set_ylabel("Strain count\n" + r'($\it{Escherichia}$ $\it{coli}$)')
ax[1].set_yticks([0,1,2,3,4])
ax[1].set_xticks(xs + 1)
ax[1].set_xticklabels(tps_ordered)
plt.show()
fig.savefig('%s/backhed_strain_richness_v2.pdf' % config.analysis_directory)
# Idea: have thickness of line be proportionate to number of instances
tps_ordered = ['Mother', 'Birth', 'Month 4', 'Month 12']
xs = np.arange(len(tps_ordered))
species = 'Escherichia_coli_58110'
fig, ax = plt.subplots()
counts_count_dict = defaultdict(int)
for subject in subject_tp_strain_count:
counts = []
for tp in tps_ordered:
if tp in subject_tp_strain_count[subject][species]:
counts.append(subject_tp_strain_count[subject][species][tp])
else:
counts.append(0)
counts_count_dict[tuple(counts)] += 1
for counts in counts_count_dict:
ax.plot(xs, counts, '.-', linewidth=counts_count_dict[counts], alpha=0.5)
ax.set_ylabel("Strain count")
ax.set_yticks([0,1,2,3,4])
ax.set_xticks(xs)
ax.set_xticklabels(tps_ordered)
plt.show()
# E. coli boxplot
tps_ordered = ['Mother', 'Birth', 'Month 4', 'Month 12']
xs = np.arange(len(tps_ordered))
species = 'Escherichia_coli_58110'
fig, ax = plt.subplots()
tp_count_dict = defaultdict(list)
for subject in subject_tp_strain_count:
for tp in tps_ordered:
if tp in subject_tp_strain_count[subject][species]:
tp_count_dict[tp].append(subject_tp_strain_count[subject][species][tp])
else:
tp_count_dict[tp].append(0)
counts = [tp_count_dict[tp] for tp in tps_ordered]
means = [np.mean(tp_count_dict[tp]) for tp in tps_ordered]
for tp in tps_ordered:
print(tp)
print(len(tp_count_dict[tp]))
print(np.mean(tp_count_dict[tp]))
ax.boxplot(counts)
ax.plot(xs + 1, means, '.-', markersize=10)
ax.set_ylabel("Strain count")
ax.set_yticks([0,1,2,3,4])
ax.set_xticks(xs + 1)
ax.set_xticklabels(tps_ordered)
plt.show()
# Load pickles
import pickle
# Pickle directory
pickle_dir = "%s/pickles" % config.data_directory
sample_species_polymorphism_dict = pickle.load(open("%s/sample_species_polymorphism_dict.pkl" % (pickle_dir), 'rb'))
# How does strain number relate to polymorphism rate?
species = 'Escherichia_coli_58110'
polymorphisms = []
strain_counts = []
for sample in sample_species_strain_count:
if species in sample_species_polymorphism_dict[sample] and species in sample_species_strain_count[sample]:
polymorphism = sample_species_polymorphism_dict[sample][species]
strain_count = sample_species_strain_count[sample][species]
polymorphisms.append(polymorphism)
strain_counts.append(strain_count)
plt.plot(polymorphisms, strain_counts, '.')
plt.show()
backhed_tp_day_dict = {'Birth': 3, 'Mother': 2, 'Month 4': 122, 'Month 12': 366}
# Generate table for processing with R, specifically
# Fit linear model:
# Num strains E. coli ~ time + host (random effect)
species = 'Escherichia_coli_58110'
output_file = open('%s/%s_strain_data.csv' % (config.analysis_directory, species), 'wb')
output_file.write(','.join(['num_strains', 'time', 'host']) + '\n')
for subject in subject_tp_strain_count:
for tp in tps_ordered:
if tp in subject_tp_strain_count[subject][species]:
day = backhed_tp_day_dict[tp]
output_file.write(','.join([str(v) for v in [subject_tp_strain_count[subject][species][tp], day, subject]]) + '\n')
else:
output_file.write(','.join([str(v) for v in [0, day, subject]]) + '\n')
output_file.close()
# Find most prevalent species among infants
species_nonzero_count = defaultdict(int)
species_relabs = defaultdict(list)
for sample in infant_samples:
for species in relab_dict[sample]:
species_relabs[species].append(relab_dict[sample][species])
if relab_dict[sample][species] > 0:
species_nonzero_count[species] += 1
species_infant_prev_ordered = []
for species, count in sorted(species_nonzero_count.items(), key=lambda x: x[1], reverse=True):
species_infant_prev_ordered.append(species)
species_avg_relabs = {species: np.mean(species_relabs[species]) for species in species_relabs}
species_infant_relab_ordered = []
for species, avg_relab in sorted(species_avg_relabs.items(), key=lambda x: x[1], reverse=True):
species_infant_relab_ordered.append(species)
# Idea: have thickness of line be proportionate to number of instances
tps_ordered = ['Mother', 'Birth', 'Month 4', 'Month 12']
xs = np.arange(len(tps_ordered))
idx_matrix = np.transpose(np.meshgrid(np.arange(4), np.arange(4)))
idx_pairs = []
for row in idx_matrix:
idx_pairs += list(row)
fig, ax = plt.subplots(4, 4, figsize=(15, 10), sharey=True, sharex=True)
for species, idx_pair in zip(species_infant_prev_ordered[:16], idx_pairs):
i, j = idx_pair
counts_count_dict = defaultdict(int)
for subject in subject_tp_strain_count:
counts = []
for tp in tps_ordered:
if tp in subject_tp_strain_count[subject][species]:
counts.append(subject_tp_strain_count[subject][species][tp])
else:
counts.append(0)
counts_count_dict[tuple(counts)] += 1
for counts in counts_count_dict:
ax[i][j].plot(xs, counts, '.-', linewidth=counts_count_dict[counts], alpha=0.5)
ax[i][j].set_title(species)
ax[i][0].set_ylabel("Strain count")
ax[0][0].set_yticks([0,1,2,3,4])
ax[0][0].set_xticks(xs)
ax[0][0].set_xticklabels(tps_ordered)
plt.subplots_adjust(wspace=0)
plt.tight_layout()
plt.show()
fig.savefig('%s/backhed_strain_count_by_species.pdf' % config.analysis_directory)
| 0.718693 | 0.420778 |
```
# Standard ML Libraries
import pandas as pd
import torch
import transformers
from torch.utils.data import Dataset, DataLoader
from transformers import DistilBertModel, DistilBertTokenizer
# Use Cuda
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# Load training data
train_df = pd.read_csv('data/train.csv')
test_df = pd.read_csv('data/test.csv')
print(train_df.head())
print(test_df.head())
# Triage Dataset Class
class Triage(Dataset):
def __init__(self, dataframe, tokenizer, max_length):
self.len = len(dataframe)
self.data = dataframe
self.tokenizer = tokenizer
self.max_length = max_length
def __getitem__(self, index):
inputs = self.tokenizer.encode_plus(
self.data.title[index],
None,
add_special_tokens = True,
max_length = self.max_length,
pad_to_max_length = True,
return_token_type_ids = True,
truncation = True
)
return {
'ids': torch.tensor(inputs['input_ids'], dtype=torch.long),
'mask': torch.tensor(inputs['attention_mask'], dtype=torch.long),
'targets': torch.tensor(self.data.labels[index], dtype=torch.long)
}
def __len__(self):
return self.len
# Create dataset and dataloader
MAX_LEN = 512
TRAIN_BATCH_SIZE = 4
VALID_BATCH_SIZE = 2
EPOCHS = 1
LEARNING_RATE = 1e-05
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-cased')
training_set = Triage(train_df, tokenizer, MAX_LEN)
testing_set = Triage(test_df, tokenizer, MAX_LEN)
train_params = { 'batch_size': TRAIN_BATCH_SIZE, 'shuffle': True, 'num_workers': 0 }
test_params = { 'batch_size': VALID_BATCH_SIZE, 'shuffle': True, 'num_workers': 0 }
training_loader = DataLoader(training_set, **train_params)
testing_loader = DataLoader(testing_set, **test_params)
class DistillBERTClass(torch.nn.Module):
def __init__(self):
super(DistillBERTClass, self).__init__()
self.l1 = DistilBertModel.from_pretrained("distilbert-base-uncased")
self.pre_classifier = torch.nn.Linear(768, 768)
self.dropout = torch.nn.Dropout(0.3)
self.classifier = torch.nn.Linear(768, 4)
def forward(self, input_ids, attention_mask):
output_1 = self.l1(input_ids=input_ids, attention_mask=attention_mask)
hidden_state = output_1[0]
pooler = hidden_state[:,0]
pooler = self.pre_classifier(pooler)
pooler = torch.nn.ReLU()(pooler)
pooler = self.dropout(pooler)
output = self.classifier(pooler)
return output
model = DistillBERTClass()
model.to(device)
# Create the loss function and optimizer
loss_function = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(params=model.parameters(), lr=LEARNING_RATE)
# Function to calculate the accuracy of the model
def calculate_accuracy(big_idx, targets):
n_correct = (big_idx == targets).sum().item()
return n_correct
# Define the training function
def train(epoch):
tr_loss = 0
n_correct = 0
nb_tr_steps = 0
nb_tr_examples = 0
model.train()
for _,data in enumerate(training_loader,0):
ids = data['ids'].to(device, dtype=torch.long)
mask = data['mask'].to(device, dtype=torch.long)
targets = data['targets'].to(device, dtype=torch.long)
outputs = model(ids, mask)
loss = loss_function(outputs, targets)
tr_loss += loss.item()
big_val, big_idx = torch.max(outputs.data, dim=1)
n_correct += calculate_accuracy(big_idx, targets)
nb_tr_steps += 1
nb_tr_examples += targets.size(0)
if _ % 1000 == 0:
loss_step = tr_loss / nb_tr_steps
accu_step = (n_correct * 100) / nb_tr_examples
print(f"Training Loss per 5000 steps: {loss_step}")
print(f"Training Accuracy per 5000 steps: {accu_step}")
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(f"The Total accuracy for epoch {epoch}: {(n_correct * 100)/nb_tr_examples}")
epoch_loss = tr_loss / nb_tr_steps
epoch_accu = (n_correct * 100) / nb_tr_examples
print(f"Training Loss Epoch: {epoch_loss}")
print(f"Training Accuracy Epoch: {epoch_accu}\n\n")
return
for epoch in range(EPOCHS):
train(epoch)
tr_loss
def valid(model, testing_loader):
tr_loss = 0
n_correct = 0
nb_tr_steps = 0
nb_tr_examples = 0
model.eval()
n_correct = 0; n_wrong = 0; total = 0
with torch.no_grad():
for _, data in enumerate(testing_loader, 0):
ids = data['ids'].to(device, dtype = torch.long)
mask = data['mask'].to(device, dtype = torch.long)
targets = data['targets'].to(device, dtype = torch.long)
outputs = model(ids, mask).squeeze()
loss = loss_function(outputs, targets)
tr_loss += loss.item()
big_val, big_idx = torch.max(outputs.data, dim=1)
n_correct += calculate_accuracy(big_idx, targets)
nb_tr_steps += 1
nb_tr_examples+=targets.size(0)
if _%5000==0:
loss_step = tr_loss/nb_tr_steps
accu_step = (n_correct*100)/nb_tr_examples
print(f"Validation Loss per 100 steps: {loss_step}")
print(f"Validation Accuracy per 100 steps: {accu_step}")
epoch_loss = tr_loss/nb_tr_steps
epoch_accu = (n_correct*100)/nb_tr_examples
print(f"Validation Loss Epoch: {epoch_loss}")
print(f"Validation Accuracy Epoch: {epoch_accu}")
return epoch_accu
print('This is the validation section to print the accuracy and see how it performs')
print('Here we are leveraging on the dataloader crearted for the validation dataset, the approcah is using more of pytorch')
acc = valid(model, testing_loader)
print("Accuracy on test data = %0.2f%%" % acc)
```
|
github_jupyter
|
# Standard ML Libraries
import pandas as pd
import torch
import transformers
from torch.utils.data import Dataset, DataLoader
from transformers import DistilBertModel, DistilBertTokenizer
# Use Cuda
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# Load training data
train_df = pd.read_csv('data/train.csv')
test_df = pd.read_csv('data/test.csv')
print(train_df.head())
print(test_df.head())
# Triage Dataset Class
class Triage(Dataset):
def __init__(self, dataframe, tokenizer, max_length):
self.len = len(dataframe)
self.data = dataframe
self.tokenizer = tokenizer
self.max_length = max_length
def __getitem__(self, index):
inputs = self.tokenizer.encode_plus(
self.data.title[index],
None,
add_special_tokens = True,
max_length = self.max_length,
pad_to_max_length = True,
return_token_type_ids = True,
truncation = True
)
return {
'ids': torch.tensor(inputs['input_ids'], dtype=torch.long),
'mask': torch.tensor(inputs['attention_mask'], dtype=torch.long),
'targets': torch.tensor(self.data.labels[index], dtype=torch.long)
}
def __len__(self):
return self.len
# Create dataset and dataloader
MAX_LEN = 512
TRAIN_BATCH_SIZE = 4
VALID_BATCH_SIZE = 2
EPOCHS = 1
LEARNING_RATE = 1e-05
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-cased')
training_set = Triage(train_df, tokenizer, MAX_LEN)
testing_set = Triage(test_df, tokenizer, MAX_LEN)
train_params = { 'batch_size': TRAIN_BATCH_SIZE, 'shuffle': True, 'num_workers': 0 }
test_params = { 'batch_size': VALID_BATCH_SIZE, 'shuffle': True, 'num_workers': 0 }
training_loader = DataLoader(training_set, **train_params)
testing_loader = DataLoader(testing_set, **test_params)
class DistillBERTClass(torch.nn.Module):
def __init__(self):
super(DistillBERTClass, self).__init__()
self.l1 = DistilBertModel.from_pretrained("distilbert-base-uncased")
self.pre_classifier = torch.nn.Linear(768, 768)
self.dropout = torch.nn.Dropout(0.3)
self.classifier = torch.nn.Linear(768, 4)
def forward(self, input_ids, attention_mask):
output_1 = self.l1(input_ids=input_ids, attention_mask=attention_mask)
hidden_state = output_1[0]
pooler = hidden_state[:,0]
pooler = self.pre_classifier(pooler)
pooler = torch.nn.ReLU()(pooler)
pooler = self.dropout(pooler)
output = self.classifier(pooler)
return output
model = DistillBERTClass()
model.to(device)
# Create the loss function and optimizer
loss_function = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(params=model.parameters(), lr=LEARNING_RATE)
# Function to calculate the accuracy of the model
def calculate_accuracy(big_idx, targets):
n_correct = (big_idx == targets).sum().item()
return n_correct
# Define the training function
def train(epoch):
tr_loss = 0
n_correct = 0
nb_tr_steps = 0
nb_tr_examples = 0
model.train()
for _,data in enumerate(training_loader,0):
ids = data['ids'].to(device, dtype=torch.long)
mask = data['mask'].to(device, dtype=torch.long)
targets = data['targets'].to(device, dtype=torch.long)
outputs = model(ids, mask)
loss = loss_function(outputs, targets)
tr_loss += loss.item()
big_val, big_idx = torch.max(outputs.data, dim=1)
n_correct += calculate_accuracy(big_idx, targets)
nb_tr_steps += 1
nb_tr_examples += targets.size(0)
if _ % 1000 == 0:
loss_step = tr_loss / nb_tr_steps
accu_step = (n_correct * 100) / nb_tr_examples
print(f"Training Loss per 5000 steps: {loss_step}")
print(f"Training Accuracy per 5000 steps: {accu_step}")
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(f"The Total accuracy for epoch {epoch}: {(n_correct * 100)/nb_tr_examples}")
epoch_loss = tr_loss / nb_tr_steps
epoch_accu = (n_correct * 100) / nb_tr_examples
print(f"Training Loss Epoch: {epoch_loss}")
print(f"Training Accuracy Epoch: {epoch_accu}\n\n")
return
for epoch in range(EPOCHS):
train(epoch)
tr_loss
def valid(model, testing_loader):
tr_loss = 0
n_correct = 0
nb_tr_steps = 0
nb_tr_examples = 0
model.eval()
n_correct = 0; n_wrong = 0; total = 0
with torch.no_grad():
for _, data in enumerate(testing_loader, 0):
ids = data['ids'].to(device, dtype = torch.long)
mask = data['mask'].to(device, dtype = torch.long)
targets = data['targets'].to(device, dtype = torch.long)
outputs = model(ids, mask).squeeze()
loss = loss_function(outputs, targets)
tr_loss += loss.item()
big_val, big_idx = torch.max(outputs.data, dim=1)
n_correct += calculate_accuracy(big_idx, targets)
nb_tr_steps += 1
nb_tr_examples+=targets.size(0)
if _%5000==0:
loss_step = tr_loss/nb_tr_steps
accu_step = (n_correct*100)/nb_tr_examples
print(f"Validation Loss per 100 steps: {loss_step}")
print(f"Validation Accuracy per 100 steps: {accu_step}")
epoch_loss = tr_loss/nb_tr_steps
epoch_accu = (n_correct*100)/nb_tr_examples
print(f"Validation Loss Epoch: {epoch_loss}")
print(f"Validation Accuracy Epoch: {epoch_accu}")
return epoch_accu
print('This is the validation section to print the accuracy and see how it performs')
print('Here we are leveraging on the dataloader crearted for the validation dataset, the approcah is using more of pytorch')
acc = valid(model, testing_loader)
print("Accuracy on test data = %0.2f%%" % acc)
| 0.887101 | 0.478955 |
```
#Author : Meetkumar Patel
#Research Paper : https://www.academia.edu/38463296/Toxic_Comment_Classification_Using_Neural_Networks_and_Machine_Learning
import os
import re
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import RNN, GRU, LSTM, Dense, Input, Embedding, Dropout, Activation, concatenate
from keras.layers import Bidirectional, GlobalAveragePooling1D, GlobalMaxPooling1D
from keras.models import Model
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras import initializers, regularizers, constraints, optimizers, layers
train_data = pd.read_csv('train.csv')
test_data = pd.read_csv('test.csv')
embedding_file = 'glove.6B.300d.txt'
train_data.describe()
test_data.describe()
train_data.head()
test_data.head()
train_data.isnull().any()
test_data.isnull().any()
# Furhter actions on any columns is not required, because no columns has any missing data.
classes = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"]
y = train_data[classes].values
train_sentences = train_data["comment_text"].fillna("fillna").str.lower()
test_sentences = test_data["comment_text"].fillna("fillna").str.lower()
max_features = 100000
max_len = 150
embed_size = 300
tokenizer = Tokenizer(max_features)
tokenizer.fit_on_texts(list(train_sentences))
tokenized_train_sentences = tokenizer.texts_to_sequences(train_sentences)
tokenized_test_sentences = tokenizer.texts_to_sequences(test_sentences)
train_sentences[1]
tokenized_train_sentences[1]
train_sentences[5]
tokenized_train_sentences[5]
train_padding = pad_sequences(tokenized_train_sentences, max_len)
test_padding = pad_sequences(tokenized_test_sentences, max_len)
def get_coefs(word, *arr): return word, np.asarray(arr, dtype='float32')
embeddings_index = dict(get_coefs(*o.rstrip().rsplit(' ')) for o in open(embedding_file))
word_index = tokenizer.word_index
nb_words = min(max_features, len(word_index))
embedding_matrix = np.zeros((nb_words, embed_size))
for word, i in word_index.items():
if i >= max_features:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
image_input = Input(shape=(max_len, ))
X = Embedding(max_features, embed_size, weights=[embedding_matrix])(image_input)
X = Bidirectional(GRU(64, return_sequences=True, dropout=0.2, recurrent_dropout=0.2))(X)
# Dropout and R-Dropout sequence, inspired by Deep Learning with Python - Francois Chollet
avg_pl = GlobalAveragePooling1D()(X)
max_pl = GlobalMaxPooling1D()(X)
conc = concatenate([avg_pl, max_pl])
X = Dense(6, activation="sigmoid")(conc)
model = Model(inputs=image_input, outputs=X)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
saved_model = "weights_base.best.hdf5"
checkpoint = ModelCheckpoint(saved_model, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
early = EarlyStopping(monitor="val_acc", mode="max", patience=5)
callbacks_list = [checkpoint, early]
batch_sz = 32
epoch = 2
model.fit(train_padding, y, batch_size=batch_sz, epochs=epoch, validation_split=0.1, callbacks=callbacks_list)
test_values = model.predict([test_padding], batch_size=1024, verbose=1)
sample_submission = pd.read_csv('sample_submission.csv')
sample_submission[classes] = test_values
sample_submission.to_csv('submission.csv', index=False)
```
|
github_jupyter
|
#Author : Meetkumar Patel
#Research Paper : https://www.academia.edu/38463296/Toxic_Comment_Classification_Using_Neural_Networks_and_Machine_Learning
import os
import re
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import RNN, GRU, LSTM, Dense, Input, Embedding, Dropout, Activation, concatenate
from keras.layers import Bidirectional, GlobalAveragePooling1D, GlobalMaxPooling1D
from keras.models import Model
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras import initializers, regularizers, constraints, optimizers, layers
train_data = pd.read_csv('train.csv')
test_data = pd.read_csv('test.csv')
embedding_file = 'glove.6B.300d.txt'
train_data.describe()
test_data.describe()
train_data.head()
test_data.head()
train_data.isnull().any()
test_data.isnull().any()
# Furhter actions on any columns is not required, because no columns has any missing data.
classes = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"]
y = train_data[classes].values
train_sentences = train_data["comment_text"].fillna("fillna").str.lower()
test_sentences = test_data["comment_text"].fillna("fillna").str.lower()
max_features = 100000
max_len = 150
embed_size = 300
tokenizer = Tokenizer(max_features)
tokenizer.fit_on_texts(list(train_sentences))
tokenized_train_sentences = tokenizer.texts_to_sequences(train_sentences)
tokenized_test_sentences = tokenizer.texts_to_sequences(test_sentences)
train_sentences[1]
tokenized_train_sentences[1]
train_sentences[5]
tokenized_train_sentences[5]
train_padding = pad_sequences(tokenized_train_sentences, max_len)
test_padding = pad_sequences(tokenized_test_sentences, max_len)
def get_coefs(word, *arr): return word, np.asarray(arr, dtype='float32')
embeddings_index = dict(get_coefs(*o.rstrip().rsplit(' ')) for o in open(embedding_file))
word_index = tokenizer.word_index
nb_words = min(max_features, len(word_index))
embedding_matrix = np.zeros((nb_words, embed_size))
for word, i in word_index.items():
if i >= max_features:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
image_input = Input(shape=(max_len, ))
X = Embedding(max_features, embed_size, weights=[embedding_matrix])(image_input)
X = Bidirectional(GRU(64, return_sequences=True, dropout=0.2, recurrent_dropout=0.2))(X)
# Dropout and R-Dropout sequence, inspired by Deep Learning with Python - Francois Chollet
avg_pl = GlobalAveragePooling1D()(X)
max_pl = GlobalMaxPooling1D()(X)
conc = concatenate([avg_pl, max_pl])
X = Dense(6, activation="sigmoid")(conc)
model = Model(inputs=image_input, outputs=X)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
saved_model = "weights_base.best.hdf5"
checkpoint = ModelCheckpoint(saved_model, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
early = EarlyStopping(monitor="val_acc", mode="max", patience=5)
callbacks_list = [checkpoint, early]
batch_sz = 32
epoch = 2
model.fit(train_padding, y, batch_size=batch_sz, epochs=epoch, validation_split=0.1, callbacks=callbacks_list)
test_values = model.predict([test_padding], batch_size=1024, verbose=1)
sample_submission = pd.read_csv('sample_submission.csv')
sample_submission[classes] = test_values
sample_submission.to_csv('submission.csv', index=False)
| 0.564819 | 0.596463 |
```
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
%matplotlib inline
# Create images with random rectangles and bounding boxes.
num_imgs = 50000
img_size = 8
min_rect_size = 1
max_rect_size = 4
num_objects = 2
bboxes = np.zeros((num_imgs, num_objects, 4))
imgs = np.zeros((num_imgs, img_size, img_size))
for i_img in range(num_imgs):
for i_object in range(num_objects):
w, h = np.random.randint(min_rect_size, max_rect_size, size=2)
x = np.random.randint(0, img_size - w)
y = np.random.randint(0, img_size - h)
imgs[i_img, x:x+w, y:y+h] = 1.
bboxes[i_img, i_object] = [x, y, w, h]
imgs.shape, bboxes.shape
import random
i = random.randint(1,1000)
plt.imshow(imgs[i].T, cmap='Greys', interpolation='none', origin='lower', extent=[0, img_size, 0, img_size])
for bbox in bboxes[i]:
plt.gca().add_patch(matplotlib.patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3], ec='r', fc='none'))
# Reshape and normalize the data to mean 0 and std 1.
X = (imgs.reshape(num_imgs, -1) - np.mean(imgs)) / np.std(imgs)
X.shape, np.mean(X), np.std(X)
# Normalize x, y, w, h by img_size, so that all values are between 0 and 1.
# Important: Do not shift to negative values (e.g. by setting to mean 0), because the IOU calculation needs positive w and h.
y = bboxes.reshape(num_imgs, -1) / img_size
y.shape, np.mean(y), np.std(y)
# Split training and test.
i = int(0.8 * num_imgs)
train_X = X[:i]
test_X = X[i:]
train_y = y[:i]
test_y = y[i:]
test_imgs = imgs[i:]
test_bboxes = bboxes[i:]
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Convolution2D, MaxPooling2D, Flatten
from keras.optimizers import SGD
filter_size = 3
pool_size = 2
model = Sequential([
Dense(512, input_dim=X.shape[-1]),
Activation('relu'),
Dense(128, input_dim=X.shape[-1]),
Activation('relu'),
Dropout(0.2),
Dense(y.shape[-1])
])
model.compile('adadelta', 'mse')
def IOU(bbox1, bbox2):
'''Calculate overlap between two bounding boxes [x, y, w, h] as the area of intersection over the area of unity'''
x1, y1, w1, h1 = bbox1[0], bbox1[1], bbox1[2], bbox1[3]
x2, y2, w2, h2 = bbox2[0], bbox2[1], bbox2[2], bbox2[3]
w_I = min(x1 + w1, x2 + w2) - max(x1, x2)
h_I = min(y1 + h1, y2 + h2) - max(y1, y2)
if w_I <= 0 or h_I <= 0: # no overlap
return 0
I = w_I * h_I
U = w1 * h1 + w2 * h2 - I
return I / U
def distance(bbox1, bbox2):
return np.sqrt(np.sum(np.square(bbox1[:2] - bbox2[:2])))
num_epochs = 50
flipped_train_y = np.array(train_y)
flipped = np.zeros((len(flipped_train_y), num_epochs))
ious_epoch = np.zeros((len(flipped_train_y), num_epochs))
dists_epoch = np.zeros((len(flipped_train_y), num_epochs))
mses_epoch = np.zeros((len(flipped_train_y), num_epochs))
for epoch in range(num_epochs):
print('Epoch', epoch)
model.fit(train_X, flipped_train_y, epochs=1, validation_data=(test_X, test_y), verbose=2)
pred_y = model.predict(train_X)
for i, (pred_bboxes, exp_bboxes) in enumerate(zip(pred_y, flipped_train_y)):
flipped_exp_bboxes = np.concatenate([exp_bboxes[4:], exp_bboxes[:4]])
mse = np.mean(np.square(pred_bboxes - exp_bboxes))
mse_flipped = np.mean(np.square(pred_bboxes - flipped_exp_bboxes))
iou = IOU(pred_bboxes[:4], exp_bboxes[:4]) + IOU(pred_bboxes[4:], exp_bboxes[4:])
iou_flipped = IOU(pred_bboxes[:4], flipped_exp_bboxes[:4]) + IOU(pred_bboxes[4:], flipped_exp_bboxes[4:])
dist = distance(pred_bboxes[:4], exp_bboxes[:4]) + distance(pred_bboxes[4:], exp_bboxes[4:])
dist_flipped = distance(pred_bboxes[:4], flipped_exp_bboxes[:4]) + distance(pred_bboxes[4:], flipped_exp_bboxes[4:])
if mse_flipped < mse: # you can also use iou or dist here
flipped_train_y[i] = flipped_exp_bboxes
flipped[i, epoch] = 1
mses_epoch[i, epoch] = mse_flipped / 2.
ious_epoch[i, epoch] = iou_flipped / 2.
dists_epoch[i, epoch] = dist_flipped / 2.
else:
mses_epoch[i, epoch] = mse / 2.
ious_epoch[i, epoch] = iou / 2.
dists_epoch[i, epoch] = dist / 2.
print('Flipped {} training samples ({} %)'.format(np.sum(flipped[:, epoch]), np.mean(flipped[:, epoch]) * 100.))
print('Mean IOU: {}'.format(np.mean(ious_epoch[:, epoch])))
print('Mean dist: {}'.format(np.mean(dists_epoch[:, epoch])))
print('Mean mse: {}'.format(np.mean(mses_epoch[:, epoch])))
print('\n')
plt.pcolor(flipped[:1000], cmap='Greys')
plt.xlabel('Epoch')
plt.ylabel('Training sample')
plt.plot(np.mean(ious_epoch, axis=0), label='Mean IOU') # between predicted and assigned true bboxes
plt.plot(np.mean(dists_epoch, axis=0), label='Mean distance') # relative to image size
plt.legend()
plt.ylim(0, 1)
pred_y = model.predict(test_X)
pred_y = pred_y.reshape(len(pred_y), num_objects, -1)
pred_bboxes = pred_y[..., :4] * img_size
pred_shapes = pred_y[..., 4:5]
pred_bboxes.shape, pred_shapes.shape
plt.figure(figsize=(16, 8))
for i_subplot in range(1, 5):
plt.subplot(1, 4, i_subplot)
i = np.random.randint(len(test_X))
plt.imshow(test_imgs[i].T, cmap='Greys', interpolation='none', origin='lower', extent=[0, img_size, 0, img_size])
for pred_bbox, exp_bbox, pred_shape in zip(pred_bboxes[i], test_bboxes[i], pred_shapes[i]):
print(pred_bbox)
plt.gca().add_patch(matplotlib.patches.Rectangle((pred_bbox[0], pred_bbox[1]), pred_bbox[2], pred_bbox[3],ec='r', fc='none'))
```
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
%matplotlib inline
# Create images with random rectangles and bounding boxes.
num_imgs = 50000
img_size = 8
min_rect_size = 1
max_rect_size = 4
num_objects = 2
bboxes = np.zeros((num_imgs, num_objects, 4))
imgs = np.zeros((num_imgs, img_size, img_size))
for i_img in range(num_imgs):
for i_object in range(num_objects):
w, h = np.random.randint(min_rect_size, max_rect_size, size=2)
x = np.random.randint(0, img_size - w)
y = np.random.randint(0, img_size - h)
imgs[i_img, x:x+w, y:y+h] = 1.
bboxes[i_img, i_object] = [x, y, w, h]
imgs.shape, bboxes.shape
import random
i = random.randint(1,1000)
plt.imshow(imgs[i].T, cmap='Greys', interpolation='none', origin='lower', extent=[0, img_size, 0, img_size])
for bbox in bboxes[i]:
plt.gca().add_patch(matplotlib.patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3], ec='r', fc='none'))
# Reshape and normalize the data to mean 0 and std 1.
X = (imgs.reshape(num_imgs, -1) - np.mean(imgs)) / np.std(imgs)
X.shape, np.mean(X), np.std(X)
# Normalize x, y, w, h by img_size, so that all values are between 0 and 1.
# Important: Do not shift to negative values (e.g. by setting to mean 0), because the IOU calculation needs positive w and h.
y = bboxes.reshape(num_imgs, -1) / img_size
y.shape, np.mean(y), np.std(y)
# Split training and test.
i = int(0.8 * num_imgs)
train_X = X[:i]
test_X = X[i:]
train_y = y[:i]
test_y = y[i:]
test_imgs = imgs[i:]
test_bboxes = bboxes[i:]
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Convolution2D, MaxPooling2D, Flatten
from keras.optimizers import SGD
filter_size = 3
pool_size = 2
model = Sequential([
Dense(512, input_dim=X.shape[-1]),
Activation('relu'),
Dense(128, input_dim=X.shape[-1]),
Activation('relu'),
Dropout(0.2),
Dense(y.shape[-1])
])
model.compile('adadelta', 'mse')
def IOU(bbox1, bbox2):
'''Calculate overlap between two bounding boxes [x, y, w, h] as the area of intersection over the area of unity'''
x1, y1, w1, h1 = bbox1[0], bbox1[1], bbox1[2], bbox1[3]
x2, y2, w2, h2 = bbox2[0], bbox2[1], bbox2[2], bbox2[3]
w_I = min(x1 + w1, x2 + w2) - max(x1, x2)
h_I = min(y1 + h1, y2 + h2) - max(y1, y2)
if w_I <= 0 or h_I <= 0: # no overlap
return 0
I = w_I * h_I
U = w1 * h1 + w2 * h2 - I
return I / U
def distance(bbox1, bbox2):
return np.sqrt(np.sum(np.square(bbox1[:2] - bbox2[:2])))
num_epochs = 50
flipped_train_y = np.array(train_y)
flipped = np.zeros((len(flipped_train_y), num_epochs))
ious_epoch = np.zeros((len(flipped_train_y), num_epochs))
dists_epoch = np.zeros((len(flipped_train_y), num_epochs))
mses_epoch = np.zeros((len(flipped_train_y), num_epochs))
for epoch in range(num_epochs):
print('Epoch', epoch)
model.fit(train_X, flipped_train_y, epochs=1, validation_data=(test_X, test_y), verbose=2)
pred_y = model.predict(train_X)
for i, (pred_bboxes, exp_bboxes) in enumerate(zip(pred_y, flipped_train_y)):
flipped_exp_bboxes = np.concatenate([exp_bboxes[4:], exp_bboxes[:4]])
mse = np.mean(np.square(pred_bboxes - exp_bboxes))
mse_flipped = np.mean(np.square(pred_bboxes - flipped_exp_bboxes))
iou = IOU(pred_bboxes[:4], exp_bboxes[:4]) + IOU(pred_bboxes[4:], exp_bboxes[4:])
iou_flipped = IOU(pred_bboxes[:4], flipped_exp_bboxes[:4]) + IOU(pred_bboxes[4:], flipped_exp_bboxes[4:])
dist = distance(pred_bboxes[:4], exp_bboxes[:4]) + distance(pred_bboxes[4:], exp_bboxes[4:])
dist_flipped = distance(pred_bboxes[:4], flipped_exp_bboxes[:4]) + distance(pred_bboxes[4:], flipped_exp_bboxes[4:])
if mse_flipped < mse: # you can also use iou or dist here
flipped_train_y[i] = flipped_exp_bboxes
flipped[i, epoch] = 1
mses_epoch[i, epoch] = mse_flipped / 2.
ious_epoch[i, epoch] = iou_flipped / 2.
dists_epoch[i, epoch] = dist_flipped / 2.
else:
mses_epoch[i, epoch] = mse / 2.
ious_epoch[i, epoch] = iou / 2.
dists_epoch[i, epoch] = dist / 2.
print('Flipped {} training samples ({} %)'.format(np.sum(flipped[:, epoch]), np.mean(flipped[:, epoch]) * 100.))
print('Mean IOU: {}'.format(np.mean(ious_epoch[:, epoch])))
print('Mean dist: {}'.format(np.mean(dists_epoch[:, epoch])))
print('Mean mse: {}'.format(np.mean(mses_epoch[:, epoch])))
print('\n')
plt.pcolor(flipped[:1000], cmap='Greys')
plt.xlabel('Epoch')
plt.ylabel('Training sample')
plt.plot(np.mean(ious_epoch, axis=0), label='Mean IOU') # between predicted and assigned true bboxes
plt.plot(np.mean(dists_epoch, axis=0), label='Mean distance') # relative to image size
plt.legend()
plt.ylim(0, 1)
pred_y = model.predict(test_X)
pred_y = pred_y.reshape(len(pred_y), num_objects, -1)
pred_bboxes = pred_y[..., :4] * img_size
pred_shapes = pred_y[..., 4:5]
pred_bboxes.shape, pred_shapes.shape
plt.figure(figsize=(16, 8))
for i_subplot in range(1, 5):
plt.subplot(1, 4, i_subplot)
i = np.random.randint(len(test_X))
plt.imshow(test_imgs[i].T, cmap='Greys', interpolation='none', origin='lower', extent=[0, img_size, 0, img_size])
for pred_bbox, exp_bbox, pred_shape in zip(pred_bboxes[i], test_bboxes[i], pred_shapes[i]):
print(pred_bbox)
plt.gca().add_patch(matplotlib.patches.Rectangle((pred_bbox[0], pred_bbox[1]), pred_bbox[2], pred_bbox[3],ec='r', fc='none'))
| 0.716119 | 0.694144 |
# Quiz - Week 4A
## Q1.
* * *
* Here is a table of 1-5 star ratings for five movies (M, N, P. Q. R) by three raters (A, B, C).
<pre>
M N P Q R
A 1 2 3 4 5
B 2 3 2 5 3
C 5 5 5 3 2
</pre>
* Normalize the ratings by subtracting the average for each row and then subtracting the average for each column in the resulting table. Then, identify the true statement about the normalized table.
## Solution 1.
Step 1. Mean Calculation for row
<pre>
M N P Q R Mean
A 1 2 3 4 5 3
B 2 3 2 5 3 3
C 5 5 5 3 2 4
</pre>
Step 2. Mean Subtraction for row
<pre>
M N P Q R Mean
A -2 -1 0 1 2 3
B -1 0 -1 2 0 3
C 1 1 1 -1 -2 4
</pre>
Step 3. Mean Calculation for col
<pre>
M N P Q R Mean
A -2 -1 0 1 2 3
B -1 0 -1 2 0 3
C 1 1 1 -1 -2 4
Mean -2/3 0 0 -2/3 0
</pre>
Step 4. Mean Substraction for col
<pre>
M N P Q R Mean
A -4/3 -1 0 1/3 2 3
B -1/3 0 -1 4/3 0 3
C 5/3 1 1 -1/3 -2 4
Mean -2/3 0 0 -2/3 0
</pre>
```
import numpy as np
print np.mean(np.array([1,2,3,4,5]))
print np.mean(np.array([2,3,2,5,3]))
print np.mean(np.array([5,5,5,3,2]))
print "=============="
a = np.array([-2,-1, 1])
# print np.mean(a)
print a - np.mean(a)
a = np.array([-1, 0, 1])
# print np.mean(a)
print a - np.mean(a)
a = np.array([ 0,-1, 1])
# print np.mean(a)
print a - np.mean(a)
a = np.array([ 1, 2,-1])
# print np.mean(a)
print a - np.mean(a)
a = np.array([ 2, 0,-2])
# print np.mean(a)
print a - np.mean(a)
```
## Q2.
* * *
* Below is a table giving the profile of three items.
<pre>
A 1 0 1 0 1 2
B 1 1 0 0 1 6
C 0 1 0 1 0 2
</pre>
* The first five attributes are Boolean, and the last is an integer "rating." Assume that the scale factor for the rating is α. Compute, as a function of α, the cosine distances between each pair of profiles. For each of α = 0, 0.5, 1, and 2, determine the cosine of the angle between each pair of vectors. Which of the following is FALSE?
```
from numpy.linalg import norm
# cos(A, B)
alphas = [0, 0.5, 1, 2]
for alpha in alphas:
print "=========================="
A = np.array([1,0,1,0,1,2*alpha], dtype="float32")
B = np.array([1,1,0,0,1,6*alpha], dtype="float32")
C = np.array([0,1,0,1,0,2*alpha], dtype="float32")
print "Current Alpha: " + str(alpha)
cos_a_b = float(np.dot(A.T, B)) / float( norm(A, ord=2)* norm(B, ord=2))
print "Consine(A, B) = " + str(cos_a_b)
cos_a_c = float(np.dot(A.T, C)) / float( norm(A, ord=2)* norm(C, ord=2))
print "Consine(A, C) = " + str(cos_a_c)
cos_b_c = float(np.dot(B.T, C)) / float( norm(B, ord=2)* norm(C, ord=2))
print "Consine(B, C) = " + str(cos_b_c)
```
# Quiz - Week 4B
# Q1.
***
Note: In this question, all columns will be written in their transposed form, as rows, to make the typography simpler. Matrix M has three rows and two columns, and the columns form an orthonormal basis. One of the columns is [2/7,3/7,6/7]. There are many options for the second column [x,y,z]. Write down those constraints on x, y, and z. Then, identify in the list below the one column that could be [x,y,z]. All components are computed to three decimal places, so the constraints may be satisfied only to a close approximation.
```
import numpy as np
def test_orth(lst, tested_lst):
col_b = np.array(tested_lst)
col_a = np.array(lst)
return (np.dot(col_b.T, col_a), np.linalg.norm(col_b, ord=2) )
a = [2/7.0, 3/7.0, 6/7.0]
print test_orth(a, [-.702, .117, .702])
print test_orth(a, [-.288, -.490, .772])
print test_orth(a, [.728, .485, -.485])
print test_orth(a, [2.250, -.500, -.750])
```
## Q2.
***
Note: In this question, all columns will be written in their transposed form, as rows, to make the typography simpler. Matrix M has three rows and three columns, and the columns form an orthonormal basis. One of the columns is [2/7,3/7,6/7], and another is [6/7, 2/7, -3/7]. Let the third column be [x,y,z]. Since the length of the vector [x,y,z] must be 1, there is a constraint that $x^2+y^2+z^2$ = 1. However, there are other constraints, and these other constraints can be used to deduce facts about the ratios among x, y, and z. Compute these ratios, and then identify one of them in the list below.
2*x + 3*y + 6*z = 0
6*x + 2*y - 3*z = 0
14*x + 7*y = 0 => y = -2*x
-4*x + 6*z = 0 => 3*z = 2*x => z = 2/3*x
## Q3.
***
Suppose we have three points in a two dimensional space: (1,1), (2,2), and (3,4). We want to perform PCA on these points, so we construct a 2-by-2 matrix whose eigenvectors are the directions that best represent these three points. Construct this matrix and identify, in the list below, one of its elements.
```
M = np.array([[1,1],
[2,2],
[3,4]])
print np.dot(M.T, M)
```
## Q4
***
Find, in the list below, the vector that is orthogonal to the vector [1,2,3]. Note: the interesting concept regarding eigenvectors is "orthonormal," that is unit vectors that are orthogonal. However, this question avoids using unit vectors to make the calculations simpler.
```
def test_orth(lst, tested_lst):
flag = True
col_b = np.array(tested_lst)
col_a = np.array(lst)
return np.dot(col_b.T, col_a)
a = [1, 2, 3]
print test_orth(a, [-1, -2, 0])
print test_orth(a, [-1, -2, -3])
print test_orth(a, [-4, 2, -1])
print test_orth(a, [-1, -1, 1])
print "================="
print test_orth(a, [-1,1,-1])
print test_orth(a, [2,-3, 1])
print test_orth(a, [-4,2,-1])
print test_orth(a, [ 3,0,-1])
```
|
github_jupyter
|
import numpy as np
print np.mean(np.array([1,2,3,4,5]))
print np.mean(np.array([2,3,2,5,3]))
print np.mean(np.array([5,5,5,3,2]))
print "=============="
a = np.array([-2,-1, 1])
# print np.mean(a)
print a - np.mean(a)
a = np.array([-1, 0, 1])
# print np.mean(a)
print a - np.mean(a)
a = np.array([ 0,-1, 1])
# print np.mean(a)
print a - np.mean(a)
a = np.array([ 1, 2,-1])
# print np.mean(a)
print a - np.mean(a)
a = np.array([ 2, 0,-2])
# print np.mean(a)
print a - np.mean(a)
from numpy.linalg import norm
# cos(A, B)
alphas = [0, 0.5, 1, 2]
for alpha in alphas:
print "=========================="
A = np.array([1,0,1,0,1,2*alpha], dtype="float32")
B = np.array([1,1,0,0,1,6*alpha], dtype="float32")
C = np.array([0,1,0,1,0,2*alpha], dtype="float32")
print "Current Alpha: " + str(alpha)
cos_a_b = float(np.dot(A.T, B)) / float( norm(A, ord=2)* norm(B, ord=2))
print "Consine(A, B) = " + str(cos_a_b)
cos_a_c = float(np.dot(A.T, C)) / float( norm(A, ord=2)* norm(C, ord=2))
print "Consine(A, C) = " + str(cos_a_c)
cos_b_c = float(np.dot(B.T, C)) / float( norm(B, ord=2)* norm(C, ord=2))
print "Consine(B, C) = " + str(cos_b_c)
import numpy as np
def test_orth(lst, tested_lst):
col_b = np.array(tested_lst)
col_a = np.array(lst)
return (np.dot(col_b.T, col_a), np.linalg.norm(col_b, ord=2) )
a = [2/7.0, 3/7.0, 6/7.0]
print test_orth(a, [-.702, .117, .702])
print test_orth(a, [-.288, -.490, .772])
print test_orth(a, [.728, .485, -.485])
print test_orth(a, [2.250, -.500, -.750])
M = np.array([[1,1],
[2,2],
[3,4]])
print np.dot(M.T, M)
def test_orth(lst, tested_lst):
flag = True
col_b = np.array(tested_lst)
col_a = np.array(lst)
return np.dot(col_b.T, col_a)
a = [1, 2, 3]
print test_orth(a, [-1, -2, 0])
print test_orth(a, [-1, -2, -3])
print test_orth(a, [-4, 2, -1])
print test_orth(a, [-1, -1, 1])
print "================="
print test_orth(a, [-1,1,-1])
print test_orth(a, [2,-3, 1])
print test_orth(a, [-4,2,-1])
print test_orth(a, [ 3,0,-1])
| 0.244543 | 0.941654 |
# Ellipsometer Basics
**Scott Prahl**
*April 2020*
This notebook reviews the basic equations used in ellipsometry. It also demonstrates how the Fresnel reflection equations are related to the ellipsometry parameter $\rho = \tan\psi \cdot e^{j\Delta}$.
References
* Archer, *Manual on Ellipsometry* 1968.
* Azzam, *Ellipsometry and Polarized Light*, 1977.
* Fujiwara, *Spectroscopic Ellipsometry*, 2007.
* Tompkins, *A User's Guide to Ellipsometry*, 1993
* Tompkins, *Handbook of Ellipsometry*, 2005.
* Woollam, *A short course in ellipsometry*, 2001.
```
import sympy
import numpy as np
import matplotlib.pyplot as plt
import pypolar.fresnel as fresnel
import pypolar.ellipsometry as ellipse
sympy.init_printing(use_unicode=True)
```
## Ellipsometry
### Layout
A basic ellipsometer configuration is shown below where the lights hits the sample at an angle $\theta$ from a normal to the surface. The incident electric field $E_{ip}$ is parallel to the plane-of-incidence (which contains the incoming vector and the vector normal to the surface). The electric field $E_{is}$ is perpendicular to the plane of incidence
<img src="http://omlc.org/~prahl/host/453/week5/ellipsometry.png" width="100%">
Usually, the incident light $\mathbf{E}_i$ is linearly polarized but does not need to be. The reflected light $\mathbf{E}_r$ is, in general, elliptically polarized.
$$
\mathbf{E}_i =
\begin{bmatrix}
E_{ip}e^{j\delta_{ip}}\\
E_{is}e^{j\delta_{is}}\\
\end{bmatrix}
\qquad\mbox{and}\qquad
\mathbf{E}_r =
\begin{bmatrix}
E_{rp}e^{j\delta_{rp}}\\
E_{rs}e^{j\delta_{rs}}\\
\end{bmatrix}
$$
### $\Delta$ and $\tan\psi$
The effect of reflection is characterized by the angle $\Delta$, defined as the change in phase, and the angle $\psi$, the arctangent of the factor by which the amplitude ratio changes.
$$
\Delta = (\delta_{rp} - \delta_{rs}) - (\delta_{ip}-\delta_{is})
$$
and
$$
\tan\psi = \frac{E_{is}}{E_{ip}} \cdot \frac{E_{rp}}{E_{rs}}
$$
In the special (but common case) of a smooth surface, there will be no mixing of parallel and perpendicular light, i.e.,
$$
\begin{bmatrix}
E_{rp}e^{j\delta_{rp}}\\
E_{rs}e^{j\delta_{rs}}
\end{bmatrix}
=
\begin{bmatrix}
r_p & 0 \\
0 & r_s
\end{bmatrix}
\begin{bmatrix}
E_{ip}e^{j\delta_{ip}}\\
E_{is}e^{j\delta_{is}}
\end{bmatrix}
=
\begin{bmatrix}
r_p E_{ip}e^{j\delta_{ip}}\\
s_s E_{is}e^{j\delta_{is}}
\end{bmatrix}
$$
the $\tan\psi$ equation simplifies to
$$
\tan\psi = \frac{r_p}{r_s}
$$
These overall change in field is often written as a single complex number $\rho$
$$
\rho = \tan\psi e^{j\Delta}
$$
Ellipsometry is the science of measuring and interpreting $\Delta$ and $\psi$ for a surface. A simple use of ellipsometry is to determine the complex index of refraction $m$ for a thick uniform flat substrate. More elaborate ellisometric techniques allow one to determine one or more coatings on the substrate.
### Refractive index
When light reflects off a surface, the amplitude and phase of the electric changes. These changes depend on the wavelength $\lambda$ of the light, the angle of incidence $\theta$, the complex refractive index of the material $m= n(1-j \kappa)$, and the polarization state of the incident beam:
* The plane of incidence contains the incident electric field propagation vector and the normal to the surface.
* The angle of incidence is the angle beween these two directions
* The real part of the refractive index $n$ determines the speed of light in the material
* The imaginary part of the refractive index $\kappa$ determines the light absorption of the material
* Linearly polarized light parallel to the plane of incidence is **p-polarized**.
* Linearly polarized light perpendicular to the plane of incident is **s-polarized**.
* The phase shift and amplitude change is different for p and s- polarized light.
* For dielectrics like glass, the amount of reflected light is determined by a single number, the index of refraction $n$.
* Semi-conductors and metals have a complex index of refraction $m = n(1 - j \kappa)$.
### Fresnel Reflection
The Fresnel formulas for light incident from a vacuum onto a flat surface at an angle $\theta$ from the normal with refractive index $m$ varies with the orientation of the electric field. The plane of incidence is defined as containing both the incident direction and the normal to the surface. If the incident field is parallel to the plane of incidence then
$$
r_p=r_\parallel ={m^2\cos\theta-\sqrt{m^2-\sin^2\theta}\over
m^2\cos\theta+\sqrt{m^2-\sin^2\theta}}
$$
If the incident field is perpendicular to the plane of incidence then
$$
r_s=r_\perp ={\cos\theta-\sqrt{m^2-\sin^2\theta}\over
\cos\theta+\sqrt{m^2-\sin^2\theta}}
$$
### Fundamental Equation of Ellipsometry
Ellipsometers are used to determine the parameters $\psi$ and $\Delta$ which can be used to calculate $\rho$
$$
\rho = {r_p(\theta)\over r_s(\theta)} = \tan\psi \cdot e^{j \Delta}
$$
The graph below shows how $\psi$ and $\Delta$ vary with the incidence angle
### Determining the complex index of refraction
A convenient formula for an isotropic, uniform sample is given in
[Measurement of the Thickness and Refractive Index of Very Thin Films and the Optical Properties of Surfaces by Ellipsometry](https://archive.org/details/jresv67An4p363) so that when $\rho$ and $\theta$ are known, the complex index of refraction can be calculated.
$$
m = \tan\theta \sqrt{1-{4\rho\sin^2\theta\over (1+\rho)^2}}
$$
```
def plot_rho(m):
angles = np.linspace(0.001,90,91)
rho = ellipse.rho_from_m(m,angles, deg=True)
plt.plot(angles, np.abs(rho), color='red')
plt.plot(angles, np.angle(rho),color='blue')
plt.xlabel(r"Incidence angle $\theta_i$ (degrees)")
plt.ylabel(r'$\tan\psi$ or $\Delta$')
plt.title('Refractive Index %.3f%.3fj' % (m.real,m.imag))
plt.xlim(0,90)
plt.text(30, 3, r'$\Delta$', color='blue')
plt.text(30, 0.8, 'tan($\psi$)', color='red')
m = 1.5
plot_rho(m)
plt.show()
m = 1.5-1.0j
plot_rho(m)
plt.show()
m = 3-3j
plot_rho(m)
plt.show()
```
Replicate figures 2.28 and 2.29 from Woollam's Short Course.
```
def plot_rho2(m1,m2):
angles = np.linspace(65,85,181)
rho1 = ellipse.rho_from_m(m1,angles,deg=True)
rho2 = ellipse.rho_from_m(m2,angles,deg=True)
psi1 = np.degrees(np.arctan(np.abs(rho1)))
psi2 = np.degrees(np.arctan(np.abs(rho2)))
Delta1 = np.degrees(np.angle(rho1))
Delta2 = np.degrees(np.angle(rho2))
plt.subplots(2,1,figsize=(8,8))
plt.subplot(2,1,1)
plt.plot(angles, psi1, color='red')
plt.plot(angles, psi2,color='blue')
plt.ylabel(r'$\psi$ (degrees)')
plt.title('%.3f%+.3fj and %.3f%+.3fj' % (m1.real,m1.imag,m2.real,m2.imag))
plt.grid(True)
plt.subplot(2,1,2)
plt.plot(angles, Delta1, color='red')
plt.plot(angles, Delta2,color='blue')
plt.xlabel(r"Incidence angle $\theta_i$ (degrees)")
plt.ylabel(r'$\Delta$ (degrees)')
plt.grid(True)
m1 = 4.516 - 0.249j # amorphous silicon
m2 = 3.875 - 0.023j # crystalline silicon
plot_rho2(m1,m2)
plt.show()
```
## Extracting Refractive Index from $\rho$
First we will calculate $\rho$ for a known complex index of refraction.
```
m = 1.5-1.3j
theta_i = np.linspace(1,89,15)
rho = ellipse.rho_from_m(m, theta_i, deg=True)
plt.plot(theta_i, np.abs(rho), 'o', color='red')
plt.plot(theta_i, np.angle(rho), 'o', color='blue')
plt.xlabel(r"Incidence angle $\theta_i$ (degrees)")
plt.ylabel(r'$\tan\psi$ or $\Delta$')
plt.title('Refractive Index %.3f%.3fj' % (m.real,m.imag))
plt.xlim(0,90)
plt.text(30, 3, r'$\Delta$', color='blue')
plt.text(30, 1.0, 'tan($\psi$)', color='red')
plt.show()
```
Then, we will see if we can recover $m$ using $\rho$ and $\theta_i$.
We will test with incidence angles from 1° to 89°. We avoid 0° and 90° because these angles are either impossible or do not contain sufficient information to invert.
```
m = 1.5-1.3j
theta_i = np.linspace(1,89,15)
rho = ellipse.rho_from_m(m, theta_i, deg=True)
m2 = ellipse.m_from_rho(rho, theta_i, deg=True)
plt.plot(theta_i, m2.real, 'o', color = 'blue')
plt.plot(theta_i, m2.imag, 'o', color='red')
plt.text(theta_i[0], m2[0].real-0.05, r'n', color='blue', va="top")
plt.text(theta_i[0], m2[0].imag+0.1, r'$n \kappa$', color='red')
plt.axhline(m.real,color='blue')
plt.axhline(m.imag,color='red')
plt.xlabel(r"Incidence angle $\theta_i$ (degrees)")
plt.ylabel('Real or Imaginary part of Refractive Index')
plt.title('Refractive Index = %.4f %.4fj'%(m.real,m.imag))
plt.show()
```
Quantitatively show that things work at an incidence angle of 70°
```
theta_i = 70
m = 1.5 - 2.0j
rho = ellipse.rho_from_m(m, theta_i, deg=True)
print('Incidence angle = %.1f°'%(theta_i))
print('rho = %.3f%+.3fj'%(rho.real,rho.imag))
print('Refractive index = %.3f%+.3fj'%(m.real,m.imag))
print()
m2 = ellipse.m_from_rho(rho, theta_i, deg=True)
print('Extracted index = %.3f%+.3fj using rho'%(m2.real,m2.imag))
```
|
github_jupyter
|
import sympy
import numpy as np
import matplotlib.pyplot as plt
import pypolar.fresnel as fresnel
import pypolar.ellipsometry as ellipse
sympy.init_printing(use_unicode=True)
def plot_rho(m):
angles = np.linspace(0.001,90,91)
rho = ellipse.rho_from_m(m,angles, deg=True)
plt.plot(angles, np.abs(rho), color='red')
plt.plot(angles, np.angle(rho),color='blue')
plt.xlabel(r"Incidence angle $\theta_i$ (degrees)")
plt.ylabel(r'$\tan\psi$ or $\Delta$')
plt.title('Refractive Index %.3f%.3fj' % (m.real,m.imag))
plt.xlim(0,90)
plt.text(30, 3, r'$\Delta$', color='blue')
plt.text(30, 0.8, 'tan($\psi$)', color='red')
m = 1.5
plot_rho(m)
plt.show()
m = 1.5-1.0j
plot_rho(m)
plt.show()
m = 3-3j
plot_rho(m)
plt.show()
def plot_rho2(m1,m2):
angles = np.linspace(65,85,181)
rho1 = ellipse.rho_from_m(m1,angles,deg=True)
rho2 = ellipse.rho_from_m(m2,angles,deg=True)
psi1 = np.degrees(np.arctan(np.abs(rho1)))
psi2 = np.degrees(np.arctan(np.abs(rho2)))
Delta1 = np.degrees(np.angle(rho1))
Delta2 = np.degrees(np.angle(rho2))
plt.subplots(2,1,figsize=(8,8))
plt.subplot(2,1,1)
plt.plot(angles, psi1, color='red')
plt.plot(angles, psi2,color='blue')
plt.ylabel(r'$\psi$ (degrees)')
plt.title('%.3f%+.3fj and %.3f%+.3fj' % (m1.real,m1.imag,m2.real,m2.imag))
plt.grid(True)
plt.subplot(2,1,2)
plt.plot(angles, Delta1, color='red')
plt.plot(angles, Delta2,color='blue')
plt.xlabel(r"Incidence angle $\theta_i$ (degrees)")
plt.ylabel(r'$\Delta$ (degrees)')
plt.grid(True)
m1 = 4.516 - 0.249j # amorphous silicon
m2 = 3.875 - 0.023j # crystalline silicon
plot_rho2(m1,m2)
plt.show()
m = 1.5-1.3j
theta_i = np.linspace(1,89,15)
rho = ellipse.rho_from_m(m, theta_i, deg=True)
plt.plot(theta_i, np.abs(rho), 'o', color='red')
plt.plot(theta_i, np.angle(rho), 'o', color='blue')
plt.xlabel(r"Incidence angle $\theta_i$ (degrees)")
plt.ylabel(r'$\tan\psi$ or $\Delta$')
plt.title('Refractive Index %.3f%.3fj' % (m.real,m.imag))
plt.xlim(0,90)
plt.text(30, 3, r'$\Delta$', color='blue')
plt.text(30, 1.0, 'tan($\psi$)', color='red')
plt.show()
m = 1.5-1.3j
theta_i = np.linspace(1,89,15)
rho = ellipse.rho_from_m(m, theta_i, deg=True)
m2 = ellipse.m_from_rho(rho, theta_i, deg=True)
plt.plot(theta_i, m2.real, 'o', color = 'blue')
plt.plot(theta_i, m2.imag, 'o', color='red')
plt.text(theta_i[0], m2[0].real-0.05, r'n', color='blue', va="top")
plt.text(theta_i[0], m2[0].imag+0.1, r'$n \kappa$', color='red')
plt.axhline(m.real,color='blue')
plt.axhline(m.imag,color='red')
plt.xlabel(r"Incidence angle $\theta_i$ (degrees)")
plt.ylabel('Real or Imaginary part of Refractive Index')
plt.title('Refractive Index = %.4f %.4fj'%(m.real,m.imag))
plt.show()
theta_i = 70
m = 1.5 - 2.0j
rho = ellipse.rho_from_m(m, theta_i, deg=True)
print('Incidence angle = %.1f°'%(theta_i))
print('rho = %.3f%+.3fj'%(rho.real,rho.imag))
print('Refractive index = %.3f%+.3fj'%(m.real,m.imag))
print()
m2 = ellipse.m_from_rho(rho, theta_i, deg=True)
print('Extracted index = %.3f%+.3fj using rho'%(m2.real,m2.imag))
| 0.479016 | 0.993189 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.