code
stringlengths 2.5k
6.36M
| kind
stringclasses 2
values | parsed_code
stringlengths 0
404k
| quality_prob
float64 0
0.98
| learning_prob
float64 0.03
1
|
---|---|---|---|---|
## This notebook will help you train a vanilla Point-Cloud AE with the basic architecture we used in our paper.
(it assumes latent_3d_points is in the PYTHONPATH and the structural losses have been compiled)
```
#Setup environment to be compatible with colab
!git clone https://github.com/T60D/latent_3d_points.git
%tensorflow_version 1.x
import sys
import tensorflow
print(tensorflow.__version__)
!apt-get install -qq gcc-5 g++-5 -y &> /dev/null
!ln -s /usr/bin/gcc-5 &> /dev/null
!ln -s /usr/bin/g++-5 &> /dev/null
!sudo apt-get update &> /dev/null
!sudo apt-get upgrade &> /dev/null
%cd /content/latent_3d_points/external/structural_losses
!make
#Import drive where the files are stored
from google.colab import drive
drive.mount('/content/drive')
%cd /content/
import os.path as osp
from latent_3d_points.src.ae_templates import mlp_architecture_ala_iclr_18, default_train_params
from latent_3d_points.src.autoencoder import Configuration as Conf
from latent_3d_points.src.point_net_ae import PointNetAutoEncoder
from latent_3d_points.src.in_out import snc_category_to_synth_id, create_dir, PointCloudDataSet, \
load_all_point_clouds_under_folder
from latent_3d_points.src.tf_utils import reset_tf_graph
from latent_3d_points.src.general_utils import plot_3d_point_cloud
%load_ext autoreload
%autoreload 2
%matplotlib inline
```
Define Basic Parameters
```
top_out_dir = '/content/drive/Shareddrives/CS230/' # Use to save Neural-Net check-points etc.
top_in_dir = '/content/drive/Shareddrives/CS230/dev_data' # Top-dir of where point-clouds are stored.
experiment_name = 'single_class_ae'
n_pc_points = 2048 # Number of points per model.
bneck_size = 128 # Bottleneck-AE size
ae_loss = 'chamfer' # Loss to optimize: 'emd' or 'chamfer'
class_name = "plane"
```
Load Point-Clouds
```
#syn_id = snc_category_to_synth_id()[class_name]
#class_dir = osp.join(top_in_dir , syn_id)
all_pc_data = load_all_point_clouds_under_folder("/content/drive/Shareddrives/CS230/dev_data", n_threads=8, file_ending='.ply', verbose=True)
```
Load default training parameters (some of which are listed beloq). For more details please print the configuration object.
'batch_size': 50
'denoising': False (# by default AE is not denoising)
'learning_rate': 0.0005
'z_rotate': False (# randomly rotate models of each batch)
'loss_display_step': 1 (# display loss at end of these many epochs)
'saver_step': 10 (# over how many epochs to save neural-network)
```
train_params = default_train_params()
encoder, decoder, enc_args, dec_args = mlp_architecture_ala_iclr_18(n_pc_points, bneck_size)
train_dir = create_dir(osp.join(top_out_dir, experiment_name))
conf = Conf(n_input = [n_pc_points, 3],
loss = ae_loss,
training_epochs = train_params['training_epochs'],
batch_size = train_params['batch_size'],
denoising = train_params['denoising'],
learning_rate = train_params['learning_rate'],
train_dir = train_dir,
loss_display_step = train_params['loss_display_step'],
saver_step = train_params['saver_step'],
z_rotate = train_params['z_rotate'],
encoder = encoder,
decoder = decoder,
encoder_args = enc_args,
decoder_args = dec_args
)
conf.experiment_name = experiment_name
conf.held_out_step = 5 # How often to evaluate/print out loss on
# held_out data (if they are provided in ae.train() ).
conf.save(osp.join(train_dir, 'configuration'))
```
If you ran the above lines, you can reload a saved model like this:
```
load_pre_trained_ae = False
restore_epoch = 500
if load_pre_trained_ae:
conf = Conf.load(train_dir + '/configuration')
reset_tf_graph()
ae = PointNetAutoEncoder(conf.experiment_name, conf)
ae.restore_model(conf.train_dir, epoch=restore_epoch)
```
Build AE Model.
```
reset_tf_graph()
ae = PointNetAutoEncoder(conf.experiment_name, conf)
```
Train the AE (save output to train_stats.txt)
```
buf_size = 1 # Make 'training_stats' file to flush each output line regarding training.
fout = open(osp.join(conf.train_dir, 'train_stats.txt'), 'a', buf_size)
train_stats = ae.train(all_pc_data, conf, log_file=fout)
fout.close()
```
Get a batch of reconstuctions and their latent-codes.
```
feed_pc, feed_model_names, _ = all_pc_data.next_batch(10)
reconstructions = ae.reconstruct(feed_pc)[0]
latent_codes = ae.transform(feed_pc)
```
Use any plotting mechanism such as matplotlib to visualize the results.
```
i = 2
plot_3d_point_cloud(reconstructions[i][:, 0],
reconstructions[i][:, 1],
reconstructions[i][:, 2], in_u_sphere=True);
i = 4
plot_3d_point_cloud(reconstructions[i][:, 0],
reconstructions[i][:, 1],
reconstructions[i][:, 2], in_u_sphere=True);
```
|
github_jupyter
|
#Setup environment to be compatible with colab
!git clone https://github.com/T60D/latent_3d_points.git
%tensorflow_version 1.x
import sys
import tensorflow
print(tensorflow.__version__)
!apt-get install -qq gcc-5 g++-5 -y &> /dev/null
!ln -s /usr/bin/gcc-5 &> /dev/null
!ln -s /usr/bin/g++-5 &> /dev/null
!sudo apt-get update &> /dev/null
!sudo apt-get upgrade &> /dev/null
%cd /content/latent_3d_points/external/structural_losses
!make
#Import drive where the files are stored
from google.colab import drive
drive.mount('/content/drive')
%cd /content/
import os.path as osp
from latent_3d_points.src.ae_templates import mlp_architecture_ala_iclr_18, default_train_params
from latent_3d_points.src.autoencoder import Configuration as Conf
from latent_3d_points.src.point_net_ae import PointNetAutoEncoder
from latent_3d_points.src.in_out import snc_category_to_synth_id, create_dir, PointCloudDataSet, \
load_all_point_clouds_under_folder
from latent_3d_points.src.tf_utils import reset_tf_graph
from latent_3d_points.src.general_utils import plot_3d_point_cloud
%load_ext autoreload
%autoreload 2
%matplotlib inline
top_out_dir = '/content/drive/Shareddrives/CS230/' # Use to save Neural-Net check-points etc.
top_in_dir = '/content/drive/Shareddrives/CS230/dev_data' # Top-dir of where point-clouds are stored.
experiment_name = 'single_class_ae'
n_pc_points = 2048 # Number of points per model.
bneck_size = 128 # Bottleneck-AE size
ae_loss = 'chamfer' # Loss to optimize: 'emd' or 'chamfer'
class_name = "plane"
#syn_id = snc_category_to_synth_id()[class_name]
#class_dir = osp.join(top_in_dir , syn_id)
all_pc_data = load_all_point_clouds_under_folder("/content/drive/Shareddrives/CS230/dev_data", n_threads=8, file_ending='.ply', verbose=True)
train_params = default_train_params()
encoder, decoder, enc_args, dec_args = mlp_architecture_ala_iclr_18(n_pc_points, bneck_size)
train_dir = create_dir(osp.join(top_out_dir, experiment_name))
conf = Conf(n_input = [n_pc_points, 3],
loss = ae_loss,
training_epochs = train_params['training_epochs'],
batch_size = train_params['batch_size'],
denoising = train_params['denoising'],
learning_rate = train_params['learning_rate'],
train_dir = train_dir,
loss_display_step = train_params['loss_display_step'],
saver_step = train_params['saver_step'],
z_rotate = train_params['z_rotate'],
encoder = encoder,
decoder = decoder,
encoder_args = enc_args,
decoder_args = dec_args
)
conf.experiment_name = experiment_name
conf.held_out_step = 5 # How often to evaluate/print out loss on
# held_out data (if they are provided in ae.train() ).
conf.save(osp.join(train_dir, 'configuration'))
load_pre_trained_ae = False
restore_epoch = 500
if load_pre_trained_ae:
conf = Conf.load(train_dir + '/configuration')
reset_tf_graph()
ae = PointNetAutoEncoder(conf.experiment_name, conf)
ae.restore_model(conf.train_dir, epoch=restore_epoch)
reset_tf_graph()
ae = PointNetAutoEncoder(conf.experiment_name, conf)
buf_size = 1 # Make 'training_stats' file to flush each output line regarding training.
fout = open(osp.join(conf.train_dir, 'train_stats.txt'), 'a', buf_size)
train_stats = ae.train(all_pc_data, conf, log_file=fout)
fout.close()
feed_pc, feed_model_names, _ = all_pc_data.next_batch(10)
reconstructions = ae.reconstruct(feed_pc)[0]
latent_codes = ae.transform(feed_pc)
i = 2
plot_3d_point_cloud(reconstructions[i][:, 0],
reconstructions[i][:, 1],
reconstructions[i][:, 2], in_u_sphere=True);
i = 4
plot_3d_point_cloud(reconstructions[i][:, 0],
reconstructions[i][:, 1],
reconstructions[i][:, 2], in_u_sphere=True);
| 0.363986 | 0.610541 |
<a href="https://colab.research.google.com/github/shreyasseshadri/SC-Project/blob/master/model.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
%reset
# # Load the Drive helper and mount
from google.colab import drive
# # This will prompt for authorization.
drive.mount('/content/drive')
import keras
import numpy as np
import random as rn
rn.seed(123)
np.random.seed(123)
from keras.models import Model
from keras.layers import Input, Dense,Conv2D ,LSTM,Lambda,Flatten,Dropout
from keras.utils import np_utils
from keras import backend as K
tf_session = K.get_session()
from keras import regularizers
import tensorflow as tf
tf.set_random_seed(123)
def stack_dim(X):
conv_outputs=tf.unstack(X,axis=3)
return tf.concat(conv_outputs,axis=1)
def get_shape(input_shape):
assert len(input_shape)==4
return (input_shape[0],input_shape[1]*input_shape[3],input_shape[2])
def model(input_shape,hidden_size,dense_layer1,dense_layer2):
X_input=Input(input_shape)
X=Conv2D(32, (5, 5), input_shape=input_shape, activation='relu')(X_input)
X=Dropout(0.5)(X)
X=Lambda(stack_dim,output_shape=get_shape)(X)
X=LSTM(hidden_size,return_sequences=True)(X)
X=Dense(dense_layer1, activation='relu',kernel_regularizer=regularizers.l2(0.01))(X)
X=Flatten()(X)
X=Dropout(0.5)(X)
X=Dense(dense_layer2, activation='softmax')(X)
model = Model(inputs = X_input, outputs = X, name='sent_classifier')
return model
X_train=np.load('drive/My Drive/SC-Project/train_data.npy')
X_test=np.load('drive/My Drive/SC-Project/X_test.npy')
y_train=np.load('drive/My Drive/SC-Project/y_train.npy')
y_test=np.load('drive/My Drive/SC-Project/y_test.npy')
m=model(X_train.shape[1:],128,50,2)
from keras.optimizers import SGD,Adam
# opt=SGD(lr=0.05, momentum=0.01,decay=0.0, nesterov=False)
m.compile(optimizer='sgd',loss='binary_crossentropy',metrics=['accuracy'])
def train(l,h):
train_size=h-l
train_pos=np.where(y_train[l:h][...,1]==1)[0]+l
train_neg=np.where(y_train[l:h][...,0]==1)[0]+l
np.random.seed(123)
pos=np.random.choice(train_pos,train_size//2)
np.random.seed(123)
neg=np.random.choice(train_neg,train_size-train_size//2)
indices=np.append(pos,neg,0)
np.random.seed(123)
np.random.shuffle(indices)
return indices
train_index=train(0,2210)
val_index=train(4000,8000)
val_index
from sklearn.utils import resample
ups_train_index = resample(train_index,replace=True,n_samples=4000,random_state=123)
len(ups_train_index)
hist=m.fit(X_train[ups_train_index],y_train[ups_train_index],batch_size=64,epochs=20,verbose=1,validation_data=(X_train[val_index],y_train[val_index]))
m.save('drive/My Drive/SC-Project/final.h5')
from keras.models import load_model
m = load_model('drive/My Drive/SC-Project/final.h5',custom_objects={"tf": tf})
pred=m.evaluate(X_test,y_test)
print(pred)
m.summary()
```
|
github_jupyter
|
%reset
# # Load the Drive helper and mount
from google.colab import drive
# # This will prompt for authorization.
drive.mount('/content/drive')
import keras
import numpy as np
import random as rn
rn.seed(123)
np.random.seed(123)
from keras.models import Model
from keras.layers import Input, Dense,Conv2D ,LSTM,Lambda,Flatten,Dropout
from keras.utils import np_utils
from keras import backend as K
tf_session = K.get_session()
from keras import regularizers
import tensorflow as tf
tf.set_random_seed(123)
def stack_dim(X):
conv_outputs=tf.unstack(X,axis=3)
return tf.concat(conv_outputs,axis=1)
def get_shape(input_shape):
assert len(input_shape)==4
return (input_shape[0],input_shape[1]*input_shape[3],input_shape[2])
def model(input_shape,hidden_size,dense_layer1,dense_layer2):
X_input=Input(input_shape)
X=Conv2D(32, (5, 5), input_shape=input_shape, activation='relu')(X_input)
X=Dropout(0.5)(X)
X=Lambda(stack_dim,output_shape=get_shape)(X)
X=LSTM(hidden_size,return_sequences=True)(X)
X=Dense(dense_layer1, activation='relu',kernel_regularizer=regularizers.l2(0.01))(X)
X=Flatten()(X)
X=Dropout(0.5)(X)
X=Dense(dense_layer2, activation='softmax')(X)
model = Model(inputs = X_input, outputs = X, name='sent_classifier')
return model
X_train=np.load('drive/My Drive/SC-Project/train_data.npy')
X_test=np.load('drive/My Drive/SC-Project/X_test.npy')
y_train=np.load('drive/My Drive/SC-Project/y_train.npy')
y_test=np.load('drive/My Drive/SC-Project/y_test.npy')
m=model(X_train.shape[1:],128,50,2)
from keras.optimizers import SGD,Adam
# opt=SGD(lr=0.05, momentum=0.01,decay=0.0, nesterov=False)
m.compile(optimizer='sgd',loss='binary_crossentropy',metrics=['accuracy'])
def train(l,h):
train_size=h-l
train_pos=np.where(y_train[l:h][...,1]==1)[0]+l
train_neg=np.where(y_train[l:h][...,0]==1)[0]+l
np.random.seed(123)
pos=np.random.choice(train_pos,train_size//2)
np.random.seed(123)
neg=np.random.choice(train_neg,train_size-train_size//2)
indices=np.append(pos,neg,0)
np.random.seed(123)
np.random.shuffle(indices)
return indices
train_index=train(0,2210)
val_index=train(4000,8000)
val_index
from sklearn.utils import resample
ups_train_index = resample(train_index,replace=True,n_samples=4000,random_state=123)
len(ups_train_index)
hist=m.fit(X_train[ups_train_index],y_train[ups_train_index],batch_size=64,epochs=20,verbose=1,validation_data=(X_train[val_index],y_train[val_index]))
m.save('drive/My Drive/SC-Project/final.h5')
from keras.models import load_model
m = load_model('drive/My Drive/SC-Project/final.h5',custom_objects={"tf": tf})
pred=m.evaluate(X_test,y_test)
print(pred)
m.summary()
| 0.708414 | 0.841826 |
```
import copy
import numpy as np
import pandas as pd
from data.dataset import StockDataset
from data.macro import Macro
from data.scaler import HybridScaler
from data.split import StratifiedTimeSeriesSplit
from data.utils import sliding_window
from model.arima import grid_search
from sklearn.model_selection import TimeSeriesSplit
from sklearn.metrics import confusion_matrix
import warnings
warnings.filterwarnings('ignore')
dataset = StockDataset('^GSPC')
df = dataset.get_hist(start_date='1950-01-01', end_date='2021-10-23', time_interval='daily')
x1 = df
x2 = pd.concat(
[dataset.lookback_agg(lookback_len=30),
dataset.lookback_agg(lookback_len=60),
dataset.lookback_agg(lookback_len=120)],
axis=1)
y = dataset.get_change_forecast_label(forecast_len=30, is_up=False, method='past_all')
macro = Macro(token='wixdGr7AAc9_syvt6cFD')
macro_data = macro.get_macro()
ori_cols = [col for col in macro_data.columns if 'lag' not in col]
lag_cols = [col for col in macro_data.columns if 'lag' in col]
# x1 = x1.merge(macro_data[ori_cols], how='left', on='date')
# x2 = x2.merge(macro_data, how='left', on='date')
x2 = x2.merge(macro_data, how='left', on='date')
x2.apply(lambda x: x.first_valid_index()).max()
# TRAIN_START = '1951-01-01'
TRAIN_START = '1988-01-10'
TEST_START = '2016-01-01'
TEST_END = '2019-12-01'
window_len = 60
scaler1 = HybridScaler()
scaler1.fit(x1[TRAIN_START:TEST_START])
scaler2 = HybridScaler()
trans_x2 = scaler2.fit_transform(x2)
indices, windows = sliding_window(scaler1.transform(x1), window_len=window_len, step_size=1)
train_start = (np.array(indices) <= TRAIN_START).sum()
test_start = (np.array(indices) <= TEST_START).sum()
test_end = (np.array(indices) <= TEST_END).sum()
train_x1 = windows[train_start:test_start]
train_x2 = trans_x2.loc[indices][train_start:test_start].values
train_y = y[indices][train_start:test_start].values
test_x1 = windows[test_start:test_end]
test_x2 = trans_x2.loc[indices][test_start:test_end].values
test_y = y[indices][test_start:test_end].values
split = TimeSeriesSplit(n_splits=10, test_size=120)
# split = StratifiedTimeSeriesSplit(n_splits=10, test_size=120, min_positive_ratio=0.25)
```
# Build up two-stage network
```
import torch
from model.double_encoder import DoubleEncoderModel
np.random.seed(1)
torch.manual_seed(1)
model = DoubleEncoderModel(x1_dim=x1.shape[1], x2_dim=x2.shape[1], dropout=0.01)
model.fit(train_x1, train_x2, train_y, max_epoch=200)
pred_y = model.predict(train_x1, train_x2)
pred_y_int = (pred_y >= 0.5).astype(int)
print(pred_y_int.sum(), train_y.sum())
confusion_matrix(train_y, pred_y_int.reshape(-1))
from model.eval import moving_average
import matplotlib.pyplot as plt
test_df = pd.DataFrame({'true': train_y, 'pred': pred_y.reshape(-1)})
ax1 = test_df.plot(figsize=(18, 10), color=['g', 'orange'])
ax1.set_ylabel('bubble')
ax1.axhline(y=0.5, color='r', linestyle='--')
ax2 = ax1.twinx()
ax2.set_ylabel('SP500')
ax2 = x1.loc[indices[train_start:test_start], 'close'].plot()
pred_y = model.predict(test_x1, test_x2)
pred_y_int = (pred_y >= 0.5).astype(int)
print(pred_y_int.sum(), test_y.sum())
confusion_matrix(test_y, pred_y_int.reshape(-1))
from sklearn.metrics import fbeta_score
print(f'Accuracy: {(test_y == pred_y_int.reshape(-1)).mean()}')
print(f'F1:{fbeta_score(test_y, pred_y_int, beta=1)}')
print(f'F2:{fbeta_score(test_y, pred_y_int, beta=2)}')
from model.eval import moving_average
import matplotlib.pyplot as plt
# test_df = moving_average(test_y, pipeline.predict_proba(test_x)[:, 1])
test_df = pd.DataFrame({'true': test_y, 'pred': pred_y.reshape(-1)})
ax1 = test_df.plot(figsize=(18, 10), color=['g', 'orange'])
ax1.set_ylabel('bubble')
ax1.axhline(y=0.5, color='r', linestyle='--')
ax2 = ax1.twinx()
ax2.set_ylabel('SP500')
ax2 = x1.loc[indices[test_start:test_end], 'close'].plot()
```
|
github_jupyter
|
import copy
import numpy as np
import pandas as pd
from data.dataset import StockDataset
from data.macro import Macro
from data.scaler import HybridScaler
from data.split import StratifiedTimeSeriesSplit
from data.utils import sliding_window
from model.arima import grid_search
from sklearn.model_selection import TimeSeriesSplit
from sklearn.metrics import confusion_matrix
import warnings
warnings.filterwarnings('ignore')
dataset = StockDataset('^GSPC')
df = dataset.get_hist(start_date='1950-01-01', end_date='2021-10-23', time_interval='daily')
x1 = df
x2 = pd.concat(
[dataset.lookback_agg(lookback_len=30),
dataset.lookback_agg(lookback_len=60),
dataset.lookback_agg(lookback_len=120)],
axis=1)
y = dataset.get_change_forecast_label(forecast_len=30, is_up=False, method='past_all')
macro = Macro(token='wixdGr7AAc9_syvt6cFD')
macro_data = macro.get_macro()
ori_cols = [col for col in macro_data.columns if 'lag' not in col]
lag_cols = [col for col in macro_data.columns if 'lag' in col]
# x1 = x1.merge(macro_data[ori_cols], how='left', on='date')
# x2 = x2.merge(macro_data, how='left', on='date')
x2 = x2.merge(macro_data, how='left', on='date')
x2.apply(lambda x: x.first_valid_index()).max()
# TRAIN_START = '1951-01-01'
TRAIN_START = '1988-01-10'
TEST_START = '2016-01-01'
TEST_END = '2019-12-01'
window_len = 60
scaler1 = HybridScaler()
scaler1.fit(x1[TRAIN_START:TEST_START])
scaler2 = HybridScaler()
trans_x2 = scaler2.fit_transform(x2)
indices, windows = sliding_window(scaler1.transform(x1), window_len=window_len, step_size=1)
train_start = (np.array(indices) <= TRAIN_START).sum()
test_start = (np.array(indices) <= TEST_START).sum()
test_end = (np.array(indices) <= TEST_END).sum()
train_x1 = windows[train_start:test_start]
train_x2 = trans_x2.loc[indices][train_start:test_start].values
train_y = y[indices][train_start:test_start].values
test_x1 = windows[test_start:test_end]
test_x2 = trans_x2.loc[indices][test_start:test_end].values
test_y = y[indices][test_start:test_end].values
split = TimeSeriesSplit(n_splits=10, test_size=120)
# split = StratifiedTimeSeriesSplit(n_splits=10, test_size=120, min_positive_ratio=0.25)
import torch
from model.double_encoder import DoubleEncoderModel
np.random.seed(1)
torch.manual_seed(1)
model = DoubleEncoderModel(x1_dim=x1.shape[1], x2_dim=x2.shape[1], dropout=0.01)
model.fit(train_x1, train_x2, train_y, max_epoch=200)
pred_y = model.predict(train_x1, train_x2)
pred_y_int = (pred_y >= 0.5).astype(int)
print(pred_y_int.sum(), train_y.sum())
confusion_matrix(train_y, pred_y_int.reshape(-1))
from model.eval import moving_average
import matplotlib.pyplot as plt
test_df = pd.DataFrame({'true': train_y, 'pred': pred_y.reshape(-1)})
ax1 = test_df.plot(figsize=(18, 10), color=['g', 'orange'])
ax1.set_ylabel('bubble')
ax1.axhline(y=0.5, color='r', linestyle='--')
ax2 = ax1.twinx()
ax2.set_ylabel('SP500')
ax2 = x1.loc[indices[train_start:test_start], 'close'].plot()
pred_y = model.predict(test_x1, test_x2)
pred_y_int = (pred_y >= 0.5).astype(int)
print(pred_y_int.sum(), test_y.sum())
confusion_matrix(test_y, pred_y_int.reshape(-1))
from sklearn.metrics import fbeta_score
print(f'Accuracy: {(test_y == pred_y_int.reshape(-1)).mean()}')
print(f'F1:{fbeta_score(test_y, pred_y_int, beta=1)}')
print(f'F2:{fbeta_score(test_y, pred_y_int, beta=2)}')
from model.eval import moving_average
import matplotlib.pyplot as plt
# test_df = moving_average(test_y, pipeline.predict_proba(test_x)[:, 1])
test_df = pd.DataFrame({'true': test_y, 'pred': pred_y.reshape(-1)})
ax1 = test_df.plot(figsize=(18, 10), color=['g', 'orange'])
ax1.set_ylabel('bubble')
ax1.axhline(y=0.5, color='r', linestyle='--')
ax2 = ax1.twinx()
ax2.set_ylabel('SP500')
ax2 = x1.loc[indices[test_start:test_end], 'close'].plot()
| 0.554229 | 0.603348 |
# Regressor for Market Sales
### Authors: Giacomo Bossi, Emanuele Chioso
## Abstract
The goal of the project is to provide a working
forecasting model to optimize promotions and
warehouse stocks of one of the most important
European retailers
## Approach
We started analysing the dataset we were given,
trying to identify correlations or patterns
between features. Once the data analysis was
complete we cleaned it (as explained in the next
section).
We then proceeded to implement some basic
regressor algorithms in order to have a first
glance of what the general performance on the
dataset was using R2 score and MAE as the evaluation
metric.
In the end we selected a few of them and
ensembled their predictions to obtain the final
prediction for the test set.
All testing was performed via holdout testing to
get a quick result for completely new classifiers,
and later with cross validation to get a less
randomized evaluation.
#### Importation of all useful packets
```
import pandas as pd
import numpy as np
np.set_printoptions(threshold='nan')
from sklearn import linear_model
from sklearn import model_selection
from sklearn import svm
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn import preprocessing
from sklearn import linear_model
from sklearn.metrics import mean_squared_error
from sklearn import svm
from sklearn.metrics import r2_score
from sklearn import linear_model
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.model_selection import StratifiedKFold
from sklearn.decomposition import PCA
from sklearn.feature_selection import f_classif
from datetime import datetime
from scipy.special import boxcox1p
from scipy import stats
from scipy.stats import norm,skew
import warnings
warnings.filterwarnings('ignore')
pd.set_option('display.max_columns', None)
%matplotlib inline
pd.set_option('display.max_rows', 500)
```
#### Functions defined in Preprocessing
```
def search_log_to_skewed_features(df):
numeric_feats = []
for col in df.columns:
if(len(df[col].unique())>2 and df[col].dtype != "object"):
numeric_feats.append(col)
# Check the skew of all numerical features
skewed_feats = df[numeric_feats].apply(lambda x: skew(x.dropna())).sort_values(ascending=False)
skewness = pd.DataFrame({'Skew': skewed_feats})
skewness = skewness[abs(skewness.Skew) > 0.75]
skewed_features = skewness.index
return skewed_features
def apply_log_to_skewed_features(df,skewed_features,lambda_log = 0.15):
for feat in skewed_features:
df[feat] = boxcox1p(df[feat], lambda_log)
print("logged features:",skewed_features)
return df
def apply_exp_to_result(df,lambda_log = 0.15):
print(df[target].mean())
df[feat] = np.inv_boxcox1p(df[target], lambda_log)
print(df[target].mean())
return df
def add_date(df):
date = np.array( [ x.split('/') for x in df['Date'].values])
date = date.astype(np.int32)
df['Date'] = [ datetime(x[2],x[1],x[0]) for x in date ]
def apply_exp_to_result(df,lambda_log = 0.15):
return inv_boxcox1p(df, lambda_log)
```
# Data Acquisition
```
dataset = pd.read_csv('../data/original_train.csv')
testset = pd.read_csv('../data/original_test.csv')
dataset=dataset[dataset.IsOpen != 0]
testset=testset[testset.IsOpen != 0]
components = dataset.columns[dataset.columns!='Unnamed: 0']
tcomponents = testset.columns[testset.columns!='Unnamed: 0']
features=set(components).intersection(tcomponents)
wtarget=list(set(components)-set(tcomponents))
target = 'NumberOfSales'
```
# Dealing with NAN
We have substituted the missing values in
Max_Gust_Speed with the values of Max_Wind.
Then, in order to fill all the missing values, we
have grouped the dataset by the StoreID and
after that, we have used a linear interpolation
taking as index the time feature.Since the
missing values of ‘Events’ are NMAR we haven’t
handle it.
## Dataset
```
add_date(dataset)
for val in dataset['StoreID'].unique():
df = pd.DataFrame(dataset.loc[dataset['StoreID'] == val])
df.index = df['Date']
df['tOpen']=df['IsOpen'].shift(-1).fillna(method='ffill')
df['yOpen']=df['IsOpen'].shift(+1).fillna(method='bfill')
df['tPromotions']=df['HasPromotions'].shift(-1).fillna(method='ffill')
df['yPromotions']=df['HasPromotions'].shift(+1).fillna(method='bfill')
df = df.interpolate(method='time',downcast='infer',limit=10)
dataset.drop(dataset.loc[dataset['StoreID'] == val].index, inplace=True)
df.index = df['StoreID']
dataset = pd.concat([dataset, df],ignore_index=True)
dataset['Precipitationmm'] = (np.ceil(dataset.Precipitationmm / 10) * 1).astype(int)
dataset['CloudCover'] = dataset['CloudCover'].fillna(dataset['Precipitationmm'])
dataset['Max_Gust_SpeedKm_h'] = dataset['Max_Gust_SpeedKm_h'].fillna(dataset['Max_Wind_SpeedKm_h'])
#Convert some data to integer
col_to_int = ['Min_VisibilitykM','Max_VisibilityKm','Max_Gust_SpeedKm_h',
'CloudCover','Mean_VisibilityKm','HasPromotions','IsHoliday','HasPromotions']
for col in col_to_int:
dataset[col] = dataset[col].astype(int)
#Convert some data to int since they are One Hot Encoded
#Add some datas about time
dataset['Month'] = pd.DatetimeIndex(dataset['Date']).month
dataset['Daysmonth']= pd.DatetimeIndex(dataset['Date']).day
dataset['Daysweek']= pd.DatetimeIndex(dataset['Date']).dayofweek
dataset['Quarter']= pd.DatetimeIndex(dataset['Date']).quarter
dataset['Year']= pd.DatetimeIndex(dataset['Date']).year
dataset.drop(columns='Date', inplace=True)
dataset.drop(columns='IsOpen', inplace=True)
```
## Testset
```
add_date(testset)
for val in testset['StoreID'].unique():
print(val,testset.shape)
df = pd.DataFrame(testset.loc[testset['StoreID'] == val])
df.index = df['Date']
df['tOpen']=df['IsOpen'].shift(-1).fillna(method='ffill')
df['yOpen']=df['IsOpen'].shift(+1).fillna(method='bfill')
df['tPromotions']=df['HasPromotions'].shift(-1).fillna(method='ffill')
df['yPromotions']=df['HasPromotions'].shift(+1).fillna(method='bfill')
df = df.interpolate(method='time',downcast='infer', limit=100)
testset.drop(testset.loc[testset['StoreID'] == val].index, inplace=True)
df.index = df['StoreID']
print(val,df.shape)
testset = pd.concat([testset, df],ignore_index=True)
print(val,testset.shape)
print(testset.shape)
testset['Precipitationmm'] = (np.ceil(testset.Precipitationmm / 10) * 1).astype(int)
testset['CloudCover'] = testset['CloudCover'].fillna(testset['Precipitationmm'])
testset['Max_Gust_SpeedKm_h'] = testset['Max_Gust_SpeedKm_h'].fillna(testset['Max_Wind_SpeedKm_h'])
testset['Min_VisibilitykM']=testset['Min_VisibilitykM'].fillna(testset['Min_VisibilitykM'].mean())
testset['Max_VisibilityKm']=testset['Max_VisibilityKm'].fillna(testset['Max_VisibilityKm'].mean())
testset['Mean_VisibilityKm']=testset['Mean_VisibilityKm'].fillna(testset['Mean_VisibilityKm'].mean())
#Convert some data to integer
col_to_int = ['Min_VisibilitykM','Max_VisibilityKm','Max_Gust_SpeedKm_h',
'CloudCover','Mean_VisibilityKm','HasPromotions','IsHoliday',
'Region','Region_AreaKM2','Region_GDP','Region_PopulationK']
for col in col_to_int:
testset[col] = testset[col].astype(int)
#Add some datas about time
testset['Month'] = pd.DatetimeIndex(testset['Date']).month
testset['Daysmonth']= pd.DatetimeIndex(testset['Date']).day
testset['Daysweek']= pd.DatetimeIndex(testset['Date']).dayofweek
testset['Quarter']= pd.DatetimeIndex(testset['Date']).quarter
testset['Year']= pd.DatetimeIndex(testset['Date']).year
testset.drop(columns='Date', inplace=True)
testset.drop(columns='IsOpen', inplace=True)
```
### Check the remained missing data
```
train_tmp = (testset.isnull().sum() / len(testset)) * 100
train_tmp = train_tmp.drop(train_tmp[train_tmp == 0].index).sort_values(ascending=False)[:100]
missing_data = pd.DataFrame({'Missing Ratio' :train_tmp})
```
# PCA Analysis and Reduction
## Weather Features
In order to reduce the number of parameters
bound to the weather features and augment the
information associated with a single feature we
have performed a Principal Component
Analysis.
We can see in this Heatmap the strong
correlations between the weather features
Considering only the first 4 components we
have reached a cumulative variance of ~98%.
So, we have reduced 20 different features into 4,
loosing only a 2% of information. Before and
after the PCA we have also performed a
normalization of the parameters to attenuate
the sensibility of this analysis to scale.
```
wheather_features = ['Max_Humidity', 'Max_Sea_Level_PressurehPa', 'Max_TemperatureC',
'Max_VisibilityKm', 'Max_Wind_SpeedKm_h', 'Mean_Dew_PointC',
'Mean_Humidity', 'Mean_Sea_Level_PressurehPa', 'Mean_TemperatureC','CloudCover',
'Mean_VisibilityKm', 'Mean_Wind_SpeedKm_h', 'Min_Dew_PointC', 'Max_Dew_PointC',
'Min_Humidity', 'Min_Sea_Level_PressurehPa', 'Min_TemperatureC',
'Min_VisibilitykM', 'Precipitationmm', 'WindDirDegrees','Max_Gust_SpeedKm_h']
full_pca_model = PCA()
n_dataset = dataset.shape[0]
n_testset = testset.shape[0]
superset = pd.concat([dataset,testset]).reset_index(drop=True)
superset[wheather_features] = preprocessing.normalize(superset[wheather_features])
full_fitted_model = full_pca_model.fit(superset[wheather_features])
corr = superset[weather_features].corr()
plt.subplots(figsize=(12,9))
plt.figure(figsize=(8, 8))
plt.semilogy(full_fitted_model.explained_variance_ratio_, '--o')
plt.xticks(np.arange(0,len(wheather_features),1))
plt.xlabel("Features")
plt.ylabel("Explained Variance Ratio")
plt.figure(figsize=(12, 12))
plt.semilogy(full_fitted_model.explained_variance_ratio_.cumsum(), '--o')
plt.xticks(np.arange(0,len(wheather_features),1))
plt.xlabel("Features")
plt.ylabel("Cumulative Explained Variance Ratio")
PCA_components=4
feature_selection_pca_model = PCA(n_components=PCA_components, svd_solver='full')
fitted_model = feature_selection_pca_model.fit(superset[wheather_features])
X_selected_features_pca = fitted_model.transform(superset[wheather_features])
toAdd = pd.DataFrame(X_selected_features_pca)
preprocessing.normalize(toAdd,axis=0)
for i in range(0,PCA_components):
superset['wheather_PCA_'+str(i)]= toAdd[i]
superset.drop(columns=wheather_features, inplace=True)
```
## Region
We have performed the same transformation
even to the features of the region. We have
reduced the four features of a region into 2
features, loosing less than 4% of variance.
```
#reduce the number of region features
region_features = ['Region_AreaKM2','Region_GDP','Region_PopulationK']
superset[region_features] = preprocessing.normalize(superset[region_features])
full_fitted_model = full_pca_model.fit(superset[region_features])
corr = superset[region_features].corr()
plt.subplots(figsize=(12,9))
plt.figure(figsize=(8, 8))
plt.semilogy(full_fitted_model.explained_variance_ratio_, '--o')
plt.xticks(np.arange(0,len(region_features),1))
plt.xlabel("Features")
plt.ylabel("Explained Variance Ratio")
plt.figure(figsize=(12, 12))
plt.semilogy(full_fitted_model.explained_variance_ratio_.cumsum(), '--o')
plt.xticks(np.arange(0,len(region_features),1))
plt.xlabel("Features")
plt.ylabel("Cumulative Explained Variance Ratio")
PCA_components=2
feature_selection_pca_model = PCA(n_components=PCA_components, svd_solver='full')
fitted_model = feature_selection_pca_model.fit(superset[region_features])
X_selected_features_pca = fitted_model.transform(superset[region_features])
toAdd = pd.DataFrame(X_selected_features_pca)
preprocessing.normalize(toAdd,axis=0)
for i in range(0,PCA_components):
superset['region_PCA_'+str(i)]= toAdd[i]
superset.drop(columns=region_features, inplace=True)
```
# OHE One Hot Encoding
```
##EXCEPTION FOR DAYS AND MONTHS
for col in superset.columns:
if (superset[col].dtypes == 'object'):
for elem in superset[col].unique():
elem = str(elem)
superset[col+'_'+elem] = superset[col].apply(lambda x: 1 if str(x)==elem else 0).values.astype(float)
superset.drop(columns=col,inplace=True)
dataset = superset[:n_dataset]
testset = superset[n_dataset:]
```
# Distibution of the Target
## Skewness removing
After some analysis, we have noticed that some
variables and also the target were skewed. So,
trying to fit a gaussian distribution we have
noticed some differences. As we notice below
for the target variable, the distribution of the
target was right-skewed.
```
plt.figure(figsize=(8, 8))
sns.distplot(dataset['NumberOfSales'] , fit=norm);
# Get the fitted parameters used by the function
(mu, sigma) = norm.fit(dataset['NumberOfSales'])
print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma))
#Now plot the distribution
plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
loc='best')
plt.ylabel('Frequency')
plt.title('NumberOfSales distribution')
#Get also the QQ-plot
fig = plt.figure()
plt.figure(figsize=(8, 8))
res = stats.probplot(dataset['NumberOfSales'], plot=plt)
plt.show()
sk_feat = search_log_to_skewed_features(dataset)
dataset = apply_log_to_skewed_features(dataset,sk_feat)
sk_feat = set(sk_feat)-set(['NumberOfSales', 'NumberOfCustomers'])
testset = apply_log_to_skewed_features(testset,sk_feat)
```
So, we have decided to apply the log
transformation to all the variables that had a
skewness greater than 0,75. The result obtained
for the target are the following:
```
plt.figure(figsize=(8, 8))
sns.distplot(dataset['NumberOfSales'] , fit=norm);
# Get the fitted parameters used by the function
(mu, sigma) = norm.fit(dataset['NumberOfSales'])
print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma))
#Now plot the distribution
plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
loc='best')
plt.ylabel('Frequency')
plt.title('NumberOfSales distribution')
#Get also the QQ-plot
fig = plt.figure()
plt.figure(figsize=(8, 8))
res = stats.probplot(dataset['NumberOfSales'], plot=plt)
plt.show()
```
# Correlation Analysis
```
corr = dataset.corr()
plt.subplots(figsize=(12,9))
sns.heatmap(corr, vmax=0.9, square=True)
```
# Feature Selection
## Random Forest Selection
To select the best features found during the preprocessing we have done several features
selection, as PCA feature selection, Correlation
based features selection and Random Forest
features selection. Since the best model found
was a XGBoost we have used a Random Forest
features selection. The threshold was set at 2 ∙ Median,
in order to
take all the features before the step in the
middle (~0,02). So, we have selected the first 21
features.
```
from sklearn.model_selection import KFold, cross_val_score, train_test_split
components = dataset.columns#[dataset.dtypes != 'object']
features=list(set(components) - set(wtarget))
#dataset[features] = dataset[features].values.astype(float)
cv = KFold(n_splits=2, random_state=21)
X = np.array(dataset[features])
y = np.array(dataset[target])
selected_feat = dataset[features].columns
from sklearn.ensemble import ExtraTreesRegressor
forest = ExtraTreesRegressor(n_estimators=250, random_state=0, n_jobs=-1)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(X.shape[1]):
print("%d. feature %d %s (%f)" % (f + 1, indices[f], selected_feat[indices[f]], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.figure(figsize=(12, 12))
plt.title("Feature importances")
plt.bar(range(X.shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(X.shape[1]), selected_feat[indices],rotation=90)
plt.xlim([-1, X.shape[1]])
plt.show()
from sklearn.feature_selection import SelectFromModel
feature_selection_model = SelectFromModel(forest, prefit=True,threshold='1.5*median')
X_selected_features_forest = feature_selection_model.transform(X)
X_selected_features_forest.shape
X_test = np.array(testset[features])
X_test_selected_features_forest = feature_selection_model.transform(X_test)
np.save('X.npy',X)
np.save('y.npy',y)
np.save('X_selected.npy',X_selected_features_forest)
np.save('X_test.npy',X_test)
np.save('X_test_selected.npy',X_test_selected_features_forest)
```
# Model Selection and Evaluation
We have trained several different models, in
order to have a more reliable valuation of the
best model to use. First of all, we have trained a
simple model, KNN regressor
```
from sklearn.linear_model import ElasticNet, Lasso, BayesianRidge, LassoLarsIC
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.kernel_ridge import KernelRidge
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone
from sklearn.model_selection import KFold, cross_val_score, train_test_split
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
from scipy.special import boxcox1p, inv_boxcox1p
from tqdm import tqdm
import xgboost as xgb
import lightgbm as lgb
import pickle
import numpy as np
```
### Lasso
```
lasso_params = { 'alpha':5e-02 }
lasso = Lasso(max_iter=10000, **lasso_params)
```
### Light Boost Parameters
```
lgb_params = {'n_jobs': -1,
'min_child_w': 1,
'colsample': 0.5,
'bagging_seed': 10,
'learning_rate': 0.7,
'bagging_fraction': 1,
'min_data_in_leaf': 8,
'objective': 'regression',
'num_leaves': 400,
'estimators': 100,
'bagging_freq': 1,
'reg_lambda': 0.9,
'reg_alpha': 0.9,
'max_bin': 300,
'min_sum_hessian_in_leaf': 11}
model_lgb = lgb.LGBMRegressor(**lgb_params)
```
### XGBoost Parameters
```
xgb_params ={
"n_estimators":100,
"colsample":0.5,
"gamma":0.05,
"learning":0.1,
"max_dep":30,
"min_child_w":1,
"reg_alpha":0.9,
"reg_lambda":0.8,
"n_jobs":-1 }
xgb_params2 ={
"n_estimators":50,
"colsample":0.5,
"gamma":0.05,
"learning":0.1,
"max_dep":30,
"min_child_w":1,
"reg_alpha":0.9,
"reg_lambda":0.8,
"n_jobs":-1 }
model_xgb = xgb.XGBRegressor(**xgb_params)
model_xgb2 = xgb.XGBRegressor(**xgb_params2)
```
### Random Forest Parameters
```
forest_params = {'min_impurity_decrease': False, 'max_features': 'auto', 'oob_score': False, 'bootstrap': True,
'warm_start': False, 'n_jobs': -1, 'criterion': 'mse', 'min_weight_fraction_leaf': 1e-07,
'min_samples_split': 5, 'min_samples_leaf': 1, 'max_leaf_nodes': None, 'n_estimators': 50,
'max_depth': 50}
model_forest = RandomForestRegressor(**forest_params)
```
### Lasso Score
```
lasso_preds = lasso_model.predict(X_test)
print("SCORE:", r2_score(y_test, apply_exp_to_result(lasso_preds)))
```
### KNN Score
The first model trained, in order to have a
baseline to overreach was the KNN. We have
trained this model with a different number of
neighbours and the best result we have
obtained was: R2 Score ≅ 0.68, using a 10 folds
cross validation.
```
result=[]
kfolds = KFold(10,shuffle=True,random_state=1234)
for i in range(2,30,1):
neigh = KNeighborsRegressor(n_neighbors=i)
scores = cross_val_score(neigh, X_selected_features_forest, y, cv=kfolds)
print('KNN has obtained',scores.mean(),'with number of Neighboors=',i)
result.append((i,scores.mean()))
plt.figure(figsize=(12,12))
results = pd.DataFrame(result)
plt.plot(results[0], results[1] ,linestyle='-', marker=".", color='green', markersize=3, label="R2")
```
### LightBoost Score
```
model_lgb.fit(X,y)
lgb_preds = model_lgb2.predict(X_test)
print("SCORE:", r2_score(y_test, apply_exp_to_result(lgb_preds)))
```
### Random Forest Score
```
model_forest.fit(X,y)
forest_preds = model_forest.predict(X_test)
print("SCORE:", r2_score(y_test, apply_exp_to_result(forest_preds)))
```
### XGB Sore
```
model_xgb.fit(X,y)
xgb_preds = model_xgb.predict(X_test)
print("SCORE:", r2_score(y_test, apply_exp_to_result(xgb_preds)))
```
### Model Averaging
```
mean_results = (lgb_preds+forest_preds+xgb_preds)/3
print("SCORE:", r2_score(y_test, apply_exp_to_result(mean_results))
print("SCORE:", mean_absolute_error(y_test, apply_exp_to_result(mean_results))
```
# Model Ensembling
Finally, we have tried to use metamodeling
since the averaging of base model improves the
results. In this approach, we have created a
meta model based on average base models and
used an out-of-folds prediction of these models
to train out meta model. Since the best base
model were: Random Forest, LightBoost, XGBoost.
The final model is the result of an ensemble of the single models.
The models performed very well with trainset created with a Random Sampling but in a more realistic approach, where we predicted two entire months, they have been outperformed by the ensembles.
```
class StackingAveragedModels(BaseEstimator, RegressorMixin, TransformerMixin):
def __init__(self, base_models, meta_model, n_folds=10):
self.base_models = base_models
self.meta_model = meta_model
self.n_folds = n_folds
# We again fit the data on clones of the original models
def fit(self, X, y):
self.base_models_ = [list() for x in self.base_models]
self.meta_model_ = clone(self.meta_model)
kfold = KFold(n_splits=self.n_folds, shuffle=True)
# Train cloned base models then create out-of-fold predictions
# that are needed to train the cloned meta-model
out_of_fold_predictions = np.zeros((X.shape[0], len(self.base_models)))
for i, model in enumerate(self.base_models):
for train_index, holdout_index in kfold.split(X, y):
instance = clone(model)
self.base_models_[i].append(instance)
instance.fit(X[train_index], y[train_index])
y_pred = instance.predict(X[holdout_index])
out_of_fold_predictions[holdout_index, i] = y_pred
# Now train the cloned meta-model using the out-of-fold predictions as new feature
self.meta_model_.fit(out_of_fold_predictions, y)
return self
#Do the predictions of all base models on the test data and use the averaged predictions as
#meta-features for the final prediction which is done by the meta-model
def predict(self, X):
meta_features = np.column_stack([
np.column_stack([model.predict(X) for model in base_models]).mean(axis=1)
for base_models in self.base_models_ ])
return self.meta_model_.predict(meta_features)
stacked_averaged_models = StackingAveragedModels(base_models = (model_xgb, model_lgb, model_forest),
meta_model = model_xgb2)
stacked_averaged_models.fit(X,y)
averaged_models_preds = stacked_averaged_models.predict(X_test)
averaged_models_preds = apply_exp_to_result(averaged_models_preds)
print("R2 Score:", r2_score(y_train, averaged_models_preds))
print("MAE Score:", mean_absolute_error(y_train, averaged_models_preds))
```
|
github_jupyter
|
import pandas as pd
import numpy as np
np.set_printoptions(threshold='nan')
from sklearn import linear_model
from sklearn import model_selection
from sklearn import svm
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn import preprocessing
from sklearn import linear_model
from sklearn.metrics import mean_squared_error
from sklearn import svm
from sklearn.metrics import r2_score
from sklearn import linear_model
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.model_selection import StratifiedKFold
from sklearn.decomposition import PCA
from sklearn.feature_selection import f_classif
from datetime import datetime
from scipy.special import boxcox1p
from scipy import stats
from scipy.stats import norm,skew
import warnings
warnings.filterwarnings('ignore')
pd.set_option('display.max_columns', None)
%matplotlib inline
pd.set_option('display.max_rows', 500)
def search_log_to_skewed_features(df):
numeric_feats = []
for col in df.columns:
if(len(df[col].unique())>2 and df[col].dtype != "object"):
numeric_feats.append(col)
# Check the skew of all numerical features
skewed_feats = df[numeric_feats].apply(lambda x: skew(x.dropna())).sort_values(ascending=False)
skewness = pd.DataFrame({'Skew': skewed_feats})
skewness = skewness[abs(skewness.Skew) > 0.75]
skewed_features = skewness.index
return skewed_features
def apply_log_to_skewed_features(df,skewed_features,lambda_log = 0.15):
for feat in skewed_features:
df[feat] = boxcox1p(df[feat], lambda_log)
print("logged features:",skewed_features)
return df
def apply_exp_to_result(df,lambda_log = 0.15):
print(df[target].mean())
df[feat] = np.inv_boxcox1p(df[target], lambda_log)
print(df[target].mean())
return df
def add_date(df):
date = np.array( [ x.split('/') for x in df['Date'].values])
date = date.astype(np.int32)
df['Date'] = [ datetime(x[2],x[1],x[0]) for x in date ]
def apply_exp_to_result(df,lambda_log = 0.15):
return inv_boxcox1p(df, lambda_log)
dataset = pd.read_csv('../data/original_train.csv')
testset = pd.read_csv('../data/original_test.csv')
dataset=dataset[dataset.IsOpen != 0]
testset=testset[testset.IsOpen != 0]
components = dataset.columns[dataset.columns!='Unnamed: 0']
tcomponents = testset.columns[testset.columns!='Unnamed: 0']
features=set(components).intersection(tcomponents)
wtarget=list(set(components)-set(tcomponents))
target = 'NumberOfSales'
add_date(dataset)
for val in dataset['StoreID'].unique():
df = pd.DataFrame(dataset.loc[dataset['StoreID'] == val])
df.index = df['Date']
df['tOpen']=df['IsOpen'].shift(-1).fillna(method='ffill')
df['yOpen']=df['IsOpen'].shift(+1).fillna(method='bfill')
df['tPromotions']=df['HasPromotions'].shift(-1).fillna(method='ffill')
df['yPromotions']=df['HasPromotions'].shift(+1).fillna(method='bfill')
df = df.interpolate(method='time',downcast='infer',limit=10)
dataset.drop(dataset.loc[dataset['StoreID'] == val].index, inplace=True)
df.index = df['StoreID']
dataset = pd.concat([dataset, df],ignore_index=True)
dataset['Precipitationmm'] = (np.ceil(dataset.Precipitationmm / 10) * 1).astype(int)
dataset['CloudCover'] = dataset['CloudCover'].fillna(dataset['Precipitationmm'])
dataset['Max_Gust_SpeedKm_h'] = dataset['Max_Gust_SpeedKm_h'].fillna(dataset['Max_Wind_SpeedKm_h'])
#Convert some data to integer
col_to_int = ['Min_VisibilitykM','Max_VisibilityKm','Max_Gust_SpeedKm_h',
'CloudCover','Mean_VisibilityKm','HasPromotions','IsHoliday','HasPromotions']
for col in col_to_int:
dataset[col] = dataset[col].astype(int)
#Convert some data to int since they are One Hot Encoded
#Add some datas about time
dataset['Month'] = pd.DatetimeIndex(dataset['Date']).month
dataset['Daysmonth']= pd.DatetimeIndex(dataset['Date']).day
dataset['Daysweek']= pd.DatetimeIndex(dataset['Date']).dayofweek
dataset['Quarter']= pd.DatetimeIndex(dataset['Date']).quarter
dataset['Year']= pd.DatetimeIndex(dataset['Date']).year
dataset.drop(columns='Date', inplace=True)
dataset.drop(columns='IsOpen', inplace=True)
add_date(testset)
for val in testset['StoreID'].unique():
print(val,testset.shape)
df = pd.DataFrame(testset.loc[testset['StoreID'] == val])
df.index = df['Date']
df['tOpen']=df['IsOpen'].shift(-1).fillna(method='ffill')
df['yOpen']=df['IsOpen'].shift(+1).fillna(method='bfill')
df['tPromotions']=df['HasPromotions'].shift(-1).fillna(method='ffill')
df['yPromotions']=df['HasPromotions'].shift(+1).fillna(method='bfill')
df = df.interpolate(method='time',downcast='infer', limit=100)
testset.drop(testset.loc[testset['StoreID'] == val].index, inplace=True)
df.index = df['StoreID']
print(val,df.shape)
testset = pd.concat([testset, df],ignore_index=True)
print(val,testset.shape)
print(testset.shape)
testset['Precipitationmm'] = (np.ceil(testset.Precipitationmm / 10) * 1).astype(int)
testset['CloudCover'] = testset['CloudCover'].fillna(testset['Precipitationmm'])
testset['Max_Gust_SpeedKm_h'] = testset['Max_Gust_SpeedKm_h'].fillna(testset['Max_Wind_SpeedKm_h'])
testset['Min_VisibilitykM']=testset['Min_VisibilitykM'].fillna(testset['Min_VisibilitykM'].mean())
testset['Max_VisibilityKm']=testset['Max_VisibilityKm'].fillna(testset['Max_VisibilityKm'].mean())
testset['Mean_VisibilityKm']=testset['Mean_VisibilityKm'].fillna(testset['Mean_VisibilityKm'].mean())
#Convert some data to integer
col_to_int = ['Min_VisibilitykM','Max_VisibilityKm','Max_Gust_SpeedKm_h',
'CloudCover','Mean_VisibilityKm','HasPromotions','IsHoliday',
'Region','Region_AreaKM2','Region_GDP','Region_PopulationK']
for col in col_to_int:
testset[col] = testset[col].astype(int)
#Add some datas about time
testset['Month'] = pd.DatetimeIndex(testset['Date']).month
testset['Daysmonth']= pd.DatetimeIndex(testset['Date']).day
testset['Daysweek']= pd.DatetimeIndex(testset['Date']).dayofweek
testset['Quarter']= pd.DatetimeIndex(testset['Date']).quarter
testset['Year']= pd.DatetimeIndex(testset['Date']).year
testset.drop(columns='Date', inplace=True)
testset.drop(columns='IsOpen', inplace=True)
train_tmp = (testset.isnull().sum() / len(testset)) * 100
train_tmp = train_tmp.drop(train_tmp[train_tmp == 0].index).sort_values(ascending=False)[:100]
missing_data = pd.DataFrame({'Missing Ratio' :train_tmp})
wheather_features = ['Max_Humidity', 'Max_Sea_Level_PressurehPa', 'Max_TemperatureC',
'Max_VisibilityKm', 'Max_Wind_SpeedKm_h', 'Mean_Dew_PointC',
'Mean_Humidity', 'Mean_Sea_Level_PressurehPa', 'Mean_TemperatureC','CloudCover',
'Mean_VisibilityKm', 'Mean_Wind_SpeedKm_h', 'Min_Dew_PointC', 'Max_Dew_PointC',
'Min_Humidity', 'Min_Sea_Level_PressurehPa', 'Min_TemperatureC',
'Min_VisibilitykM', 'Precipitationmm', 'WindDirDegrees','Max_Gust_SpeedKm_h']
full_pca_model = PCA()
n_dataset = dataset.shape[0]
n_testset = testset.shape[0]
superset = pd.concat([dataset,testset]).reset_index(drop=True)
superset[wheather_features] = preprocessing.normalize(superset[wheather_features])
full_fitted_model = full_pca_model.fit(superset[wheather_features])
corr = superset[weather_features].corr()
plt.subplots(figsize=(12,9))
plt.figure(figsize=(8, 8))
plt.semilogy(full_fitted_model.explained_variance_ratio_, '--o')
plt.xticks(np.arange(0,len(wheather_features),1))
plt.xlabel("Features")
plt.ylabel("Explained Variance Ratio")
plt.figure(figsize=(12, 12))
plt.semilogy(full_fitted_model.explained_variance_ratio_.cumsum(), '--o')
plt.xticks(np.arange(0,len(wheather_features),1))
plt.xlabel("Features")
plt.ylabel("Cumulative Explained Variance Ratio")
PCA_components=4
feature_selection_pca_model = PCA(n_components=PCA_components, svd_solver='full')
fitted_model = feature_selection_pca_model.fit(superset[wheather_features])
X_selected_features_pca = fitted_model.transform(superset[wheather_features])
toAdd = pd.DataFrame(X_selected_features_pca)
preprocessing.normalize(toAdd,axis=0)
for i in range(0,PCA_components):
superset['wheather_PCA_'+str(i)]= toAdd[i]
superset.drop(columns=wheather_features, inplace=True)
#reduce the number of region features
region_features = ['Region_AreaKM2','Region_GDP','Region_PopulationK']
superset[region_features] = preprocessing.normalize(superset[region_features])
full_fitted_model = full_pca_model.fit(superset[region_features])
corr = superset[region_features].corr()
plt.subplots(figsize=(12,9))
plt.figure(figsize=(8, 8))
plt.semilogy(full_fitted_model.explained_variance_ratio_, '--o')
plt.xticks(np.arange(0,len(region_features),1))
plt.xlabel("Features")
plt.ylabel("Explained Variance Ratio")
plt.figure(figsize=(12, 12))
plt.semilogy(full_fitted_model.explained_variance_ratio_.cumsum(), '--o')
plt.xticks(np.arange(0,len(region_features),1))
plt.xlabel("Features")
plt.ylabel("Cumulative Explained Variance Ratio")
PCA_components=2
feature_selection_pca_model = PCA(n_components=PCA_components, svd_solver='full')
fitted_model = feature_selection_pca_model.fit(superset[region_features])
X_selected_features_pca = fitted_model.transform(superset[region_features])
toAdd = pd.DataFrame(X_selected_features_pca)
preprocessing.normalize(toAdd,axis=0)
for i in range(0,PCA_components):
superset['region_PCA_'+str(i)]= toAdd[i]
superset.drop(columns=region_features, inplace=True)
##EXCEPTION FOR DAYS AND MONTHS
for col in superset.columns:
if (superset[col].dtypes == 'object'):
for elem in superset[col].unique():
elem = str(elem)
superset[col+'_'+elem] = superset[col].apply(lambda x: 1 if str(x)==elem else 0).values.astype(float)
superset.drop(columns=col,inplace=True)
dataset = superset[:n_dataset]
testset = superset[n_dataset:]
plt.figure(figsize=(8, 8))
sns.distplot(dataset['NumberOfSales'] , fit=norm);
# Get the fitted parameters used by the function
(mu, sigma) = norm.fit(dataset['NumberOfSales'])
print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma))
#Now plot the distribution
plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
loc='best')
plt.ylabel('Frequency')
plt.title('NumberOfSales distribution')
#Get also the QQ-plot
fig = plt.figure()
plt.figure(figsize=(8, 8))
res = stats.probplot(dataset['NumberOfSales'], plot=plt)
plt.show()
sk_feat = search_log_to_skewed_features(dataset)
dataset = apply_log_to_skewed_features(dataset,sk_feat)
sk_feat = set(sk_feat)-set(['NumberOfSales', 'NumberOfCustomers'])
testset = apply_log_to_skewed_features(testset,sk_feat)
plt.figure(figsize=(8, 8))
sns.distplot(dataset['NumberOfSales'] , fit=norm);
# Get the fitted parameters used by the function
(mu, sigma) = norm.fit(dataset['NumberOfSales'])
print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma))
#Now plot the distribution
plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
loc='best')
plt.ylabel('Frequency')
plt.title('NumberOfSales distribution')
#Get also the QQ-plot
fig = plt.figure()
plt.figure(figsize=(8, 8))
res = stats.probplot(dataset['NumberOfSales'], plot=plt)
plt.show()
corr = dataset.corr()
plt.subplots(figsize=(12,9))
sns.heatmap(corr, vmax=0.9, square=True)
from sklearn.model_selection import KFold, cross_val_score, train_test_split
components = dataset.columns#[dataset.dtypes != 'object']
features=list(set(components) - set(wtarget))
#dataset[features] = dataset[features].values.astype(float)
cv = KFold(n_splits=2, random_state=21)
X = np.array(dataset[features])
y = np.array(dataset[target])
selected_feat = dataset[features].columns
from sklearn.ensemble import ExtraTreesRegressor
forest = ExtraTreesRegressor(n_estimators=250, random_state=0, n_jobs=-1)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(X.shape[1]):
print("%d. feature %d %s (%f)" % (f + 1, indices[f], selected_feat[indices[f]], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.figure(figsize=(12, 12))
plt.title("Feature importances")
plt.bar(range(X.shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(X.shape[1]), selected_feat[indices],rotation=90)
plt.xlim([-1, X.shape[1]])
plt.show()
from sklearn.feature_selection import SelectFromModel
feature_selection_model = SelectFromModel(forest, prefit=True,threshold='1.5*median')
X_selected_features_forest = feature_selection_model.transform(X)
X_selected_features_forest.shape
X_test = np.array(testset[features])
X_test_selected_features_forest = feature_selection_model.transform(X_test)
np.save('X.npy',X)
np.save('y.npy',y)
np.save('X_selected.npy',X_selected_features_forest)
np.save('X_test.npy',X_test)
np.save('X_test_selected.npy',X_test_selected_features_forest)
from sklearn.linear_model import ElasticNet, Lasso, BayesianRidge, LassoLarsIC
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.kernel_ridge import KernelRidge
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone
from sklearn.model_selection import KFold, cross_val_score, train_test_split
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
from scipy.special import boxcox1p, inv_boxcox1p
from tqdm import tqdm
import xgboost as xgb
import lightgbm as lgb
import pickle
import numpy as np
lasso_params = { 'alpha':5e-02 }
lasso = Lasso(max_iter=10000, **lasso_params)
lgb_params = {'n_jobs': -1,
'min_child_w': 1,
'colsample': 0.5,
'bagging_seed': 10,
'learning_rate': 0.7,
'bagging_fraction': 1,
'min_data_in_leaf': 8,
'objective': 'regression',
'num_leaves': 400,
'estimators': 100,
'bagging_freq': 1,
'reg_lambda': 0.9,
'reg_alpha': 0.9,
'max_bin': 300,
'min_sum_hessian_in_leaf': 11}
model_lgb = lgb.LGBMRegressor(**lgb_params)
xgb_params ={
"n_estimators":100,
"colsample":0.5,
"gamma":0.05,
"learning":0.1,
"max_dep":30,
"min_child_w":1,
"reg_alpha":0.9,
"reg_lambda":0.8,
"n_jobs":-1 }
xgb_params2 ={
"n_estimators":50,
"colsample":0.5,
"gamma":0.05,
"learning":0.1,
"max_dep":30,
"min_child_w":1,
"reg_alpha":0.9,
"reg_lambda":0.8,
"n_jobs":-1 }
model_xgb = xgb.XGBRegressor(**xgb_params)
model_xgb2 = xgb.XGBRegressor(**xgb_params2)
forest_params = {'min_impurity_decrease': False, 'max_features': 'auto', 'oob_score': False, 'bootstrap': True,
'warm_start': False, 'n_jobs': -1, 'criterion': 'mse', 'min_weight_fraction_leaf': 1e-07,
'min_samples_split': 5, 'min_samples_leaf': 1, 'max_leaf_nodes': None, 'n_estimators': 50,
'max_depth': 50}
model_forest = RandomForestRegressor(**forest_params)
lasso_preds = lasso_model.predict(X_test)
print("SCORE:", r2_score(y_test, apply_exp_to_result(lasso_preds)))
result=[]
kfolds = KFold(10,shuffle=True,random_state=1234)
for i in range(2,30,1):
neigh = KNeighborsRegressor(n_neighbors=i)
scores = cross_val_score(neigh, X_selected_features_forest, y, cv=kfolds)
print('KNN has obtained',scores.mean(),'with number of Neighboors=',i)
result.append((i,scores.mean()))
plt.figure(figsize=(12,12))
results = pd.DataFrame(result)
plt.plot(results[0], results[1] ,linestyle='-', marker=".", color='green', markersize=3, label="R2")
model_lgb.fit(X,y)
lgb_preds = model_lgb2.predict(X_test)
print("SCORE:", r2_score(y_test, apply_exp_to_result(lgb_preds)))
model_forest.fit(X,y)
forest_preds = model_forest.predict(X_test)
print("SCORE:", r2_score(y_test, apply_exp_to_result(forest_preds)))
model_xgb.fit(X,y)
xgb_preds = model_xgb.predict(X_test)
print("SCORE:", r2_score(y_test, apply_exp_to_result(xgb_preds)))
mean_results = (lgb_preds+forest_preds+xgb_preds)/3
print("SCORE:", r2_score(y_test, apply_exp_to_result(mean_results))
print("SCORE:", mean_absolute_error(y_test, apply_exp_to_result(mean_results))
class StackingAveragedModels(BaseEstimator, RegressorMixin, TransformerMixin):
def __init__(self, base_models, meta_model, n_folds=10):
self.base_models = base_models
self.meta_model = meta_model
self.n_folds = n_folds
# We again fit the data on clones of the original models
def fit(self, X, y):
self.base_models_ = [list() for x in self.base_models]
self.meta_model_ = clone(self.meta_model)
kfold = KFold(n_splits=self.n_folds, shuffle=True)
# Train cloned base models then create out-of-fold predictions
# that are needed to train the cloned meta-model
out_of_fold_predictions = np.zeros((X.shape[0], len(self.base_models)))
for i, model in enumerate(self.base_models):
for train_index, holdout_index in kfold.split(X, y):
instance = clone(model)
self.base_models_[i].append(instance)
instance.fit(X[train_index], y[train_index])
y_pred = instance.predict(X[holdout_index])
out_of_fold_predictions[holdout_index, i] = y_pred
# Now train the cloned meta-model using the out-of-fold predictions as new feature
self.meta_model_.fit(out_of_fold_predictions, y)
return self
#Do the predictions of all base models on the test data and use the averaged predictions as
#meta-features for the final prediction which is done by the meta-model
def predict(self, X):
meta_features = np.column_stack([
np.column_stack([model.predict(X) for model in base_models]).mean(axis=1)
for base_models in self.base_models_ ])
return self.meta_model_.predict(meta_features)
stacked_averaged_models = StackingAveragedModels(base_models = (model_xgb, model_lgb, model_forest),
meta_model = model_xgb2)
stacked_averaged_models.fit(X,y)
averaged_models_preds = stacked_averaged_models.predict(X_test)
averaged_models_preds = apply_exp_to_result(averaged_models_preds)
print("R2 Score:", r2_score(y_train, averaged_models_preds))
print("MAE Score:", mean_absolute_error(y_train, averaged_models_preds))
| 0.434941 | 0.918954 |
<a href="https://colab.research.google.com/github/XavierCarrera/Tutorial-Machine-Learning-Arboles/blob/main/1_%C3%81rboles_de_Regresion.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Introduccción
En este notebook vamos a implementar una árbol de regresión para compararlo con una regresión linear simple.
Para este ejercicio utilizaremos el data set de [Fish Market](https://www.kaggle.com/aungpyaeap/fish-market?select=Fish.csv), el cual contiene información sobre los pescados a la venta en un mercado. Los features son los siguientes:
* *Species*: que los indica a la especie que pertenece el pescado
* *Weight*: peso en gramos
* *Length1*: longitud vertical en cms
* *Length2*: longitud diagonal en cms
* *Length3*: longitud cruzada en cms
* *Height*: altura en cms
* *Width*: anchura
Para este caso, usaremos el peso como el feature que queremos predecir y el cual trataremos como nuestra variable dependiente (aunque esto no necesariamente sea cierto).
Las librerías que utilizamos son:
* **Numpy** que tiene precargadas funciones para manejar vectores y matrices.
* **Pandas** que nos permite trabajar con matrices como tablas.
* **Seaborn** y **Matplotlib** para visualizar datos.
* **Scikit-Learn** con el que armaremos un pequeño modelo de machine learning usando regresión logística.
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import LinearRegression
%matplotlib inline
plt.rcParams['figure.figsize'] = (16, 9)
plt.style.use('ggplot')
import warnings
warnings.filterwarnings('ignore')
```
Empezamos abriendo el data set con la función load_dataset para visualizar nuestro data frame.
```
df = pd.read_csv("https://raw.githubusercontent.com/XavierCarrera/Tutorial-Machine-Learning-Arboles/main/Fish.csv")
df
```
Con la función *info* podemos ver los tipos de datos que usamos.
```
df.info()
```
Con la función *describe* podemos ver la información estadística básica.
```
df.describe()
```
# Análisis Exploratorio de Datos
En este punto conviene hacer sentido de nuestros datos y ver que tenemos a la mano. Uno de los análisis más rápidos que podemos hacer es calculando coeficientes de correlación. Esto lo podemos hacer con la función *corr* de Pandas y luego aplicando un *heatmap* de Seaborn.
Nota importante: el método *corr* sin parámetros calcula correlaciones de Pearson, la cual se centra en la dependencia lineal. Si deseamos buscar otro tipo de correlaciones, podemos indicarselo como *method* seguido de la correlación que busquemos. Esta puede también ser de Kendall (asociación ordinal) o de Spearman (correlación entre variables aleatorias).
```
corr = df.corr()
sns.heatmap(corr, annot = True, yticklabels=corr.columns, xticklabels=corr.columns)
```
Una forma de visualizar esto es aplicando un *pairplot* de Seaborn
```
sns.pairplot(df, hue="Species")
```
Hemos elegido de manera arbitraria el peso como nuestra variable a predecir. Podemos ver que guarda cierta correlación con otras vairables en nuestro data set. La forma en la que podemos predecir el peso de los peces puede variar.
Para efectos de simpleza para nuestro ejercicio, utilizaremos solamente la variable de longitud vertical el cual tiene un coeficiente de correlación de 0,92. Sin embargo, no podemos dejar de notar que pueden existir varios factores para entender nuestro fenómeno.
```
sns.regplot(x=df["Length1"], y=df["Weight"])
```
# Modelos: Regresión Linear vs Árbol de Decisión
Ahora, entrenaremos nuestro modelo usando los módulos *Linear regression* y *Decision Tree Regressor* de Scikit-Learn. Como buena práctica, dividimos nuestro set en entrenamiento y pruebas.
Dado que usaremos una sola variable para predecir, necesitamos crear una matriz artificial con el método *reshape*.
```
X = df["Length1"].values.reshape(-1,1)
y = df["Weight"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 42)
reg = LinearRegression()
reg.fit(X_train, y_train)
tree = DecisionTreeRegressor()
tree.fit(X_train, y_train)
```
Entrenados los modelos, podemos hacer predicciones usando el grupo de pruebas. Este nos permitirá saber que tan alejadas están nuestras predicciones.
```
y_reg_predict = reg.predict(X_test)
y_tree_predict = tree.predict(X_test)
mse = mean_squared_error(y_test, y_reg_predict)
ase = mean_absolute_error(y_test, y_reg_predict)
print(mse)
print(ase)
mse = mean_squared_error(y_test, y_tree_predict)
ase = mean_absolute_error(y_test, y_tree_predict)
print(mse)
print(ase)
```
Otra forma de visualizar estos errores es graficando los residuales (resta entre valor real y valor predecido).
```
plt.style.use('fivethirtyeight')
plt.scatter(reg.predict(X_train), reg.predict(X_train) - y_train,
color = "red", s = 10, label = 'Datos de Entrenamiento')
plt.scatter(reg.predict(X_test), reg.predict(X_test) - y_test,
color = "blue", s = 10, label = 'Datos de Testeo')
plt.hlines(y = 0, xmin = 0, xmax = 7, linewidth = 2)
plt.legend(loc = 'upper right')
plt.title("Errores Residuales de Regresión Linear")
plt.show()
plt.style.use('fivethirtyeight')
plt.scatter(tree.predict(X_train), reg.predict(X_train) - y_train,
color = "red", s = 10, label = 'Datos de Entrenamiento')
plt.scatter(tree.predict(X_test), reg.predict(X_test) - y_test,
color = "blue", s = 10, label = 'Datos de Testeo')
plt.hlines(y = 0, xmin = 0, xmax = 7, linewidth = 2)
plt.legend(loc = 'upper right')
plt.title("Errores Residuales de Regresión Linear")
plt.show()
```
En este caso, hemos usaro el promedio de error cuadrado y error absoluto para determinar cual sería el mejor enfoque para solucionar nuestro problema. El error absoluto no nos dice mucho. Sin embargo, el error cuadrado se encuentra más alejado del 0 en el árbol de regresión. Consecuentemente, una regresión lineal simple sería más eficiente.
Pero ¿porqué? Como hemos dicho, un solo árbol de decisión es bastante malo para hacer predicciones. Por ende, necesitamos otro enfoque que veremos en los siguientes notebooks.
|
github_jupyter
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import LinearRegression
%matplotlib inline
plt.rcParams['figure.figsize'] = (16, 9)
plt.style.use('ggplot')
import warnings
warnings.filterwarnings('ignore')
df = pd.read_csv("https://raw.githubusercontent.com/XavierCarrera/Tutorial-Machine-Learning-Arboles/main/Fish.csv")
df
df.info()
df.describe()
corr = df.corr()
sns.heatmap(corr, annot = True, yticklabels=corr.columns, xticklabels=corr.columns)
sns.pairplot(df, hue="Species")
sns.regplot(x=df["Length1"], y=df["Weight"])
X = df["Length1"].values.reshape(-1,1)
y = df["Weight"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 42)
reg = LinearRegression()
reg.fit(X_train, y_train)
tree = DecisionTreeRegressor()
tree.fit(X_train, y_train)
y_reg_predict = reg.predict(X_test)
y_tree_predict = tree.predict(X_test)
mse = mean_squared_error(y_test, y_reg_predict)
ase = mean_absolute_error(y_test, y_reg_predict)
print(mse)
print(ase)
mse = mean_squared_error(y_test, y_tree_predict)
ase = mean_absolute_error(y_test, y_tree_predict)
print(mse)
print(ase)
plt.style.use('fivethirtyeight')
plt.scatter(reg.predict(X_train), reg.predict(X_train) - y_train,
color = "red", s = 10, label = 'Datos de Entrenamiento')
plt.scatter(reg.predict(X_test), reg.predict(X_test) - y_test,
color = "blue", s = 10, label = 'Datos de Testeo')
plt.hlines(y = 0, xmin = 0, xmax = 7, linewidth = 2)
plt.legend(loc = 'upper right')
plt.title("Errores Residuales de Regresión Linear")
plt.show()
plt.style.use('fivethirtyeight')
plt.scatter(tree.predict(X_train), reg.predict(X_train) - y_train,
color = "red", s = 10, label = 'Datos de Entrenamiento')
plt.scatter(tree.predict(X_test), reg.predict(X_test) - y_test,
color = "blue", s = 10, label = 'Datos de Testeo')
plt.hlines(y = 0, xmin = 0, xmax = 7, linewidth = 2)
plt.legend(loc = 'upper right')
plt.title("Errores Residuales de Regresión Linear")
plt.show()
| 0.735926 | 0.984546 |
```
%matplotlib inline
%reload_ext autoreload
%autoreload 2
# 多行输出
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
```
- hook
- pytorch 的 hook 有 forwardhook和backward hook,必须包括三个参数 module,imput,output(当前模块,当前模块的输入,当前模块的输出)
```
import torch
from torch import nn, optim
from functools import partial
import matplotlib.pyplot as plt
import numpy as np
data = torch.randn(10, 26)
model = nn.Sequential(
nn.Linear(26, 10),
nn.Linear(10, 5),
nn.ReLU(inplace=True),
nn.Linear(5, 3)
)
model(data)
act_means = [[] for _ in model]
act_stds = [[] for _ in model]
def append_stats(i, mod, inp, outp):
if mod.training:
act_means[i].append(outp.data.mean())
act_stds [i].append(outp.data.std())
for i, m in enumerate(model):
m.register_forward_hook(partial(append_stats, i))
model(data)
act_means
act_stds
```
- hook 在不使用的时候应该删除,否则内存会受不了
```
def children(m): return list(m.children())
class Hook():
def __init__(self, m, f): self.hook = m.register_forward_hook(partial(f, self))
def remove(self): self.hook.remove()
def __del__(self): self.remove()
def append_stats(hook, mod, inp, outp):
if not hasattr(hook,'stats'): hook.stats = ([],[])
means,stds = hook.stats
if mod.training:
means.append(outp.data.mean())
stds .append(outp.data.std())
hooks = [Hook(l, append_stats) for l in children(model)]
model(data)
h.stats
```
**给我们的Hooks类一个`__enter__` 和 `__exit__` 方法后,我们可以将它用作上下文管理器。这样可以确保一旦我们离开with块,所有的hook都被移除了。**
```
torch.log1p??
```
## dropout
**dropout 就是一个 mask 的过程**
```
def dropout_mask(x:Tensor, sz:Collection[int], p:float):
"Return a dropout mask of the same type as `x`, size `sz`, with probability `p` to cancel an element."
return x.new(*sz).bernoulli_(1-p).div_(1-p)
class RNNDropout(nn.Module):
"Dropout with probability `p` that is consistent on the seq_len dimension."
def __init__(self, p:float=0.5):
super().__init__()
self.p=p
def forward(self, x:Tensor)->Tensor:
if not self.training or self.p == 0.: return x
m = dropout_mask(x.data, (x.size(0), 1, x.size(2)), self.p)
return x * m
a = torch.randn(3, 4, 3)
b = a.clone() # 这里必须是 clone, Python是链接
a.bernoulli_(0.8).div_(0.8) * b
m = RNNDropout(0.2)
m(b)
```
- 线性插值
$$\text{out}_i = \text{start}_i + \text{weight}_i \times (\text{end}_i - \text{start}_i)$$
```
torch.lerp??
```
- BatchNorma 实现
```
class BatchNorm(nn.Module):
def __init__(self, nf, mom=0.1, eps=1e-5):
super().__init__()
# NB: pytorch bn mom is opposite of what you'd expect
self.mom,self.eps = mom,eps
self.mults = nn.Parameter(torch.ones (nf,1,1))
self.adds = nn.Parameter(torch.zeros(nf,1,1))
self.register_buffer('vars', torch.ones(1,nf,1,1))
self.register_buffer('means', torch.zeros(1,nf,1,1))
def update_stats(self, x):
m = x.mean((0,2,3), keepdim=True)
v = x.var ((0,2,3), keepdim=True)
self.means.lerp_(m, self.mom)
self.vars.lerp_ (v, self.mom)
return m,v
def forward(self, x):
if self.training:
with torch.no_grad(): m,v = self.update_stats(x)
else: m,v = self.means,self.vars
x = (x-m) / (v+self.eps).sqrt()
return x*self.mults + self.adds
```
$$y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta$$
```
class LayerNorm(nn.Module):
__constants__ = ['eps']
def __init__(self, eps=1e-5):
super().__init__()
self.eps = eps
self.mult = nn.Parameter(tensor(1.))
self.add = nn.Parameter(tensor(0.))
def forward(self, x):
m = x.mean((1,2,3), keepdim=True)
v = x.var ((1,2,3), keepdim=True)
x = (x-m) / ((v+self.eps).sqrt())
return x*self.mult + self.add
class InstanceNorm(nn.Module):
__constants__ = ['eps']
def __init__(self, nf, eps=1e-0):
super().__init__()
self.eps = eps
self.mults = nn.Parameter(torch.ones (nf,1,1))
self.adds = nn.Parameter(torch.zeros(nf,1,1))
def forward(self, x):
m = x.mean((2,3), keepdim=True)
v = x.var ((2,3), keepdim=True)
res = (x-m) / ((v+self.eps).sqrt())
return res*self.mults + self.adds
```
- batchnorm 会收到 batchsize的影响,当bs非常小的时候,均值几乎接近0
- 使用 光滑 BN 解决
```
class RunningBatchNorm(nn.Module):
def __init__(self, nf, mom=0.1, eps=1e-5):
super().__init__()
self.mom,self.eps = mom,eps
self.mults = nn.Parameter(torch.ones (nf,1,1))
self.adds = nn.Parameter(torch.zeros(nf,1,1))
self.register_buffer('sums', torch.zeros(1,nf,1,1))
self.register_buffer('sqrs', torch.zeros(1,nf,1,1))
self.register_buffer('batch', tensor(0.))
self.register_buffer('count', tensor(0.))
self.register_buffer('step', tensor(0.))
self.register_buffer('dbias', tensor(0.))
def update_stats(self, x):
bs,nc,*_ = x.shape
self.sums.detach_()
self.sqrs.detach_()
dims = (0,2,3)
s = x.sum(dims, keepdim=True)
ss = (x*x).sum(dims, keepdim=True)
c = self.count.new_tensor(x.numel()/nc)
mom1 = 1 - (1-self.mom)/math.sqrt(bs-1)
self.mom1 = self.dbias.new_tensor(mom1)
self.sums.lerp_(s, self.mom1)
self.sqrs.lerp_(ss, self.mom1)
self.count.lerp_(c, self.mom1)
self.dbias = self.dbias*(1-self.mom1) + self.mom1
self.batch += bs
self.step += 1
def forward(self, x):
if self.training: self.update_stats(x)
sums = self.sums
sqrs = self.sqrs
c = self.count
if self.step<100:
sums = sums / self.dbias
sqrs = sqrs / self.dbias
c = c / self.dbias
means = sums/c
vars = (sqrs/c).sub_(means*means)
if bool(self.batch < 20): vars.clamp_min_(0.01)
x = (x-means).div_((vars.add_(self.eps)).sqrt())
return x.mul_(self.mults).add_(self.adds)
```
### Simplified RunningBatchNorm
```
class RunningBatchNorm(nn.Module):
def __init__(self, nf, mom=0.1, eps=1e-5):
super().__init__()
self.mom, self.eps = mom, eps
self.mults = nn.Parameter(torch.ones (nf,1,1))
self.adds = nn.Parameter(torch.zeros(nf,1,1))
self.register_buffer('sums', torch.zeros(1,nf,1,1))
self.register_buffer('sqrs', torch.zeros(1,nf,1,1))
self.register_buffer('count', tensor(0.))
self.register_buffer('factor', tensor(0.))
self.register_buffer('offset', tensor(0.))
self.batch = 0
def update_stats(self, x):
bs,nc,*_ = x.shape
self.sums.detach_()
self.sqrs.detach_()
dims = (0,2,3)
s = x .sum(dims, keepdim=True)
ss = (x*x).sum(dims, keepdim=True)
c = s.new_tensor(x.numel()/nc)
mom1 = s.new_tensor(1 - (1-self.mom)/math.sqrt(bs-1))
self.sums .lerp_(s , mom1)
self.sqrs .lerp_(ss, mom1)
self.count.lerp_(c , mom1)
self.batch += bs
means = self.sums/self.count
varns = (self.sqrs/self.count).sub_(means*means)
if bool(self.batch < 20): varns.clamp_min_(0.01)
self.factor = self.mults / (varns+self.eps).sqrt()
self.offset = self.adds - means*self.factor
def forward(self, x):
if self.training: self.update_stats(x)
return x*self.factor + self.offset
```
|
github_jupyter
|
%matplotlib inline
%reload_ext autoreload
%autoreload 2
# 多行输出
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
import torch
from torch import nn, optim
from functools import partial
import matplotlib.pyplot as plt
import numpy as np
data = torch.randn(10, 26)
model = nn.Sequential(
nn.Linear(26, 10),
nn.Linear(10, 5),
nn.ReLU(inplace=True),
nn.Linear(5, 3)
)
model(data)
act_means = [[] for _ in model]
act_stds = [[] for _ in model]
def append_stats(i, mod, inp, outp):
if mod.training:
act_means[i].append(outp.data.mean())
act_stds [i].append(outp.data.std())
for i, m in enumerate(model):
m.register_forward_hook(partial(append_stats, i))
model(data)
act_means
act_stds
def children(m): return list(m.children())
class Hook():
def __init__(self, m, f): self.hook = m.register_forward_hook(partial(f, self))
def remove(self): self.hook.remove()
def __del__(self): self.remove()
def append_stats(hook, mod, inp, outp):
if not hasattr(hook,'stats'): hook.stats = ([],[])
means,stds = hook.stats
if mod.training:
means.append(outp.data.mean())
stds .append(outp.data.std())
hooks = [Hook(l, append_stats) for l in children(model)]
model(data)
h.stats
torch.log1p??
def dropout_mask(x:Tensor, sz:Collection[int], p:float):
"Return a dropout mask of the same type as `x`, size `sz`, with probability `p` to cancel an element."
return x.new(*sz).bernoulli_(1-p).div_(1-p)
class RNNDropout(nn.Module):
"Dropout with probability `p` that is consistent on the seq_len dimension."
def __init__(self, p:float=0.5):
super().__init__()
self.p=p
def forward(self, x:Tensor)->Tensor:
if not self.training or self.p == 0.: return x
m = dropout_mask(x.data, (x.size(0), 1, x.size(2)), self.p)
return x * m
a = torch.randn(3, 4, 3)
b = a.clone() # 这里必须是 clone, Python是链接
a.bernoulli_(0.8).div_(0.8) * b
m = RNNDropout(0.2)
m(b)
torch.lerp??
class BatchNorm(nn.Module):
def __init__(self, nf, mom=0.1, eps=1e-5):
super().__init__()
# NB: pytorch bn mom is opposite of what you'd expect
self.mom,self.eps = mom,eps
self.mults = nn.Parameter(torch.ones (nf,1,1))
self.adds = nn.Parameter(torch.zeros(nf,1,1))
self.register_buffer('vars', torch.ones(1,nf,1,1))
self.register_buffer('means', torch.zeros(1,nf,1,1))
def update_stats(self, x):
m = x.mean((0,2,3), keepdim=True)
v = x.var ((0,2,3), keepdim=True)
self.means.lerp_(m, self.mom)
self.vars.lerp_ (v, self.mom)
return m,v
def forward(self, x):
if self.training:
with torch.no_grad(): m,v = self.update_stats(x)
else: m,v = self.means,self.vars
x = (x-m) / (v+self.eps).sqrt()
return x*self.mults + self.adds
class LayerNorm(nn.Module):
__constants__ = ['eps']
def __init__(self, eps=1e-5):
super().__init__()
self.eps = eps
self.mult = nn.Parameter(tensor(1.))
self.add = nn.Parameter(tensor(0.))
def forward(self, x):
m = x.mean((1,2,3), keepdim=True)
v = x.var ((1,2,3), keepdim=True)
x = (x-m) / ((v+self.eps).sqrt())
return x*self.mult + self.add
class InstanceNorm(nn.Module):
__constants__ = ['eps']
def __init__(self, nf, eps=1e-0):
super().__init__()
self.eps = eps
self.mults = nn.Parameter(torch.ones (nf,1,1))
self.adds = nn.Parameter(torch.zeros(nf,1,1))
def forward(self, x):
m = x.mean((2,3), keepdim=True)
v = x.var ((2,3), keepdim=True)
res = (x-m) / ((v+self.eps).sqrt())
return res*self.mults + self.adds
class RunningBatchNorm(nn.Module):
def __init__(self, nf, mom=0.1, eps=1e-5):
super().__init__()
self.mom,self.eps = mom,eps
self.mults = nn.Parameter(torch.ones (nf,1,1))
self.adds = nn.Parameter(torch.zeros(nf,1,1))
self.register_buffer('sums', torch.zeros(1,nf,1,1))
self.register_buffer('sqrs', torch.zeros(1,nf,1,1))
self.register_buffer('batch', tensor(0.))
self.register_buffer('count', tensor(0.))
self.register_buffer('step', tensor(0.))
self.register_buffer('dbias', tensor(0.))
def update_stats(self, x):
bs,nc,*_ = x.shape
self.sums.detach_()
self.sqrs.detach_()
dims = (0,2,3)
s = x.sum(dims, keepdim=True)
ss = (x*x).sum(dims, keepdim=True)
c = self.count.new_tensor(x.numel()/nc)
mom1 = 1 - (1-self.mom)/math.sqrt(bs-1)
self.mom1 = self.dbias.new_tensor(mom1)
self.sums.lerp_(s, self.mom1)
self.sqrs.lerp_(ss, self.mom1)
self.count.lerp_(c, self.mom1)
self.dbias = self.dbias*(1-self.mom1) + self.mom1
self.batch += bs
self.step += 1
def forward(self, x):
if self.training: self.update_stats(x)
sums = self.sums
sqrs = self.sqrs
c = self.count
if self.step<100:
sums = sums / self.dbias
sqrs = sqrs / self.dbias
c = c / self.dbias
means = sums/c
vars = (sqrs/c).sub_(means*means)
if bool(self.batch < 20): vars.clamp_min_(0.01)
x = (x-means).div_((vars.add_(self.eps)).sqrt())
return x.mul_(self.mults).add_(self.adds)
class RunningBatchNorm(nn.Module):
def __init__(self, nf, mom=0.1, eps=1e-5):
super().__init__()
self.mom, self.eps = mom, eps
self.mults = nn.Parameter(torch.ones (nf,1,1))
self.adds = nn.Parameter(torch.zeros(nf,1,1))
self.register_buffer('sums', torch.zeros(1,nf,1,1))
self.register_buffer('sqrs', torch.zeros(1,nf,1,1))
self.register_buffer('count', tensor(0.))
self.register_buffer('factor', tensor(0.))
self.register_buffer('offset', tensor(0.))
self.batch = 0
def update_stats(self, x):
bs,nc,*_ = x.shape
self.sums.detach_()
self.sqrs.detach_()
dims = (0,2,3)
s = x .sum(dims, keepdim=True)
ss = (x*x).sum(dims, keepdim=True)
c = s.new_tensor(x.numel()/nc)
mom1 = s.new_tensor(1 - (1-self.mom)/math.sqrt(bs-1))
self.sums .lerp_(s , mom1)
self.sqrs .lerp_(ss, mom1)
self.count.lerp_(c , mom1)
self.batch += bs
means = self.sums/self.count
varns = (self.sqrs/self.count).sub_(means*means)
if bool(self.batch < 20): varns.clamp_min_(0.01)
self.factor = self.mults / (varns+self.eps).sqrt()
self.offset = self.adds - means*self.factor
def forward(self, x):
if self.training: self.update_stats(x)
return x*self.factor + self.offset
| 0.840292 | 0.807764 |
<a rel="license" href="http://creativecommons.org/licenses/by/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by/4.0/88x31.png" /></a><br /><span xmlns:dct="http://purl.org/dc/terms/" property="dct:title"><b>The Traveling Salesman Problem</b></span> by <a xmlns:cc="http://creativecommons.org/ns#" href="http://mate.unipv.it/gualandi" property="cc:attributionName" rel="cc:attributionURL">Stefano Gualandi</a> is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution 4.0 International License</a>.<br />Based on a work at <a xmlns:dct="http://purl.org/dc/terms/" href="https://github.com/mathcoding/opt4ds" rel="dct:source">https://github.com/mathcoding/opt4ds</a>.
# 4. The Traveling Salesman Problem
In this notebook, we show how to solve the *Ticket Student Selling Problem* (known in the academic literature as the **Traveling Sales Problem (TSP)**) by using Integer Linear Programming.
For a nice source of nice information about the TSP problem, please, visit the [TSP webiste](http://www.math.uwaterloo.ca/tsp/)
The following lines are for running this notebook in a COLAB:
```
import shutil
import sys
import os.path
if not shutil.which("pyomo"):
!pip install -q pyomo
assert(shutil.which("pyomo"))
if not (shutil.which("glpk") or os.path.isfile("glpk")):
if "google.colab" in sys.modules:
!apt-get install -y -qq glpk-utils
else:
try:
!conda install -c conda-forge glpk
except:
pass
```
## 4.1 Introduction
A student from the University of Pavia must sell the tickets for the next post-Covid19 re-opening party, planned on March, 1st, 2021. For this reason, she must visit all the $n$ residences of the city exactly once, and then she has to return back to her own residence.
The time taken to go from residence $i$ to residence $j$ is $c_{ij}$, and the visiting time is fixed for each residence. The student wants to find the order which permits to be back as soon as possible, in order to study for the next very challenging exam on Optimization Models and Algorithms.
The input data are:
* The number of residences $n$ in Pavia, with a mapping of residence to indices $I=\{1,\dots,n\}$.
* The cost matrix $C$ with all the pairwise distances between the residences.
## 4.2 Primal Heuristic
Any permutation of the $n$ residence represents a feasible solution.
The residence locations are given as a list of pair of coordinates:
```
Ls = [(38.24, 20.42), (39.57, 26.15), (40.56, 25.32), (36.26, 23.12),
(33.48, 10.54), (37.56, 12.19), (38.42, 13.11), (37.52, 20.44),
(41.23, 9.10), (41.17, 13.05), (36.08, -5.21), (38.47, 15.13),
(38.15, 15.35), (37.51, 15.17), (35.49, 14.32), (39.36, 19.56)]
```
The cost matrix can be computed with the following function:
```
import numpy as np
from math import sqrt
def CostMatrix(Ls):
n = len(Ls)
C = 100000*np.ones((n,n)) # Very high cost to forbid stay trapped in a residence
for i, (a,b) in enumerate(Ls):
for j, (c,d) in enumerate(Ls[i+1:]):
C[i, i+j+1] = sqrt((a-c)**2 + (b-d)**2)
C[i+j+1, i] = C[i, i+j+1]
return C
Ls = [(38.24, 20.42), (39.57, 26.15), (40.56, 25.32), (36.26, 23.12),
(33.48, 10.54), (37.56, 12.19), (38.42, 13.11), (37.52, 20.44),
(41.23, 9.10), (41.17, 13.05), (36.08, -5.21), (38.47, 15.13),
(38.15, 15.35), (37.51, 15.17), (35.49, 14.32), (39.36, 19.56)]
C = CostMatrix(Ls)
```
Any permutation is a feasible solution (likely, far from being optimal):
```
tour = [(i,i+1) for i in range(len(Ls)-1)]
# From the last residence back to the first one
tour = tour + [(tour[-1][1],tour[0][0])]
tour
```
**EXERCISE 1:** Write a function to compute the cost of a given tour.
## 4.2 Plotting Solutions
If the points can be displayed in a plane, we can use matplotlib to represent a (partial) solution of our problem
```
def PlotTour(Ps, Ls):
# Report solution value
import matplotlib.pyplot as plt
import numpy as np
import pylab as pl
from matplotlib import collections as mc
lines = [[Ps[i], Ps[j]] for i,j in Ls]
lc = mc.LineCollection(lines, linewidths=2)
fig, ax = pl.subplots()
ax.add_collection(lc)
ax.autoscale()
ax.margins(0.1)
```
To plot a solution as a list of sequential pairs:
```
PlotTour(Ls, tour)
```
**QUESTION:** How far we are from the optimum value?
## 4.3 Integer Linear Programming Model
For each ordered pair $(i,j) \in I \times I, i \neq j$, we introduce a binary decision variable $x_{ij} \in \{0,1\}$, which indicates if the student travels from residence $i$ to $j$.
**EXERCISE 2:** Write a possible ILP model to solve this problem
```
# Complete the following code
def SolveTSP(C):
pass
```
**EXERCISE 3:** Debug your ILP model, please!
**EXERCISE 4:** Improve your model to provide a tighter LP relaxation at the root node.
## 4.4 Larger Instances
Once you have find a model that correctly solves the given TSP instance, try to solve the following larger instance, which contains the coordinates of a number of villages in Baviera.
```
BAVIERA = [(1150.0, 1760.0), (630.0, 1660.0), (40.0, 2090.0), (750.0, 1100.0),
(1030.0, 2070.0), (1650.0, 650.0), (1490.0, 1630.0), (790.0, 2260.0),
(710.0, 1310.0), (840.0, 550.0), (1170.0, 2300.0), (970.0, 1340.0),
(510.0, 700.0), (750.0, 900.0), (1280.0, 1200.0), (230.0, 590.0),
(460.0, 860.0), (1040.0, 950.0), (590.0, 1390.0), (830.0, 1770.0),
(490.0, 500.0), (1840.0, 1240.0), (1260.0, 1500.0), (1280.0, 790.0),
(490.0, 2130.0), (1460.0, 1420.0), (1260.0, 1910.0), (360.0, 1980.0),
(750.0, 2030.0)]
```
If you can solve also this instance in a few seconds, then you can try to evaluate how your model scale with an increasing number of cities, by using the following random instance generator:
```
def RandomTSP(n):
from numpy import random
return [(x,y) for x,y in zip(random.random(n), random.random(n))]
for n in [50, 75, 100, 150, 200]:
Ls = RandomTSP(100)
# solve tsp with your model
```
|
github_jupyter
|
import shutil
import sys
import os.path
if not shutil.which("pyomo"):
!pip install -q pyomo
assert(shutil.which("pyomo"))
if not (shutil.which("glpk") or os.path.isfile("glpk")):
if "google.colab" in sys.modules:
!apt-get install -y -qq glpk-utils
else:
try:
!conda install -c conda-forge glpk
except:
pass
Ls = [(38.24, 20.42), (39.57, 26.15), (40.56, 25.32), (36.26, 23.12),
(33.48, 10.54), (37.56, 12.19), (38.42, 13.11), (37.52, 20.44),
(41.23, 9.10), (41.17, 13.05), (36.08, -5.21), (38.47, 15.13),
(38.15, 15.35), (37.51, 15.17), (35.49, 14.32), (39.36, 19.56)]
import numpy as np
from math import sqrt
def CostMatrix(Ls):
n = len(Ls)
C = 100000*np.ones((n,n)) # Very high cost to forbid stay trapped in a residence
for i, (a,b) in enumerate(Ls):
for j, (c,d) in enumerate(Ls[i+1:]):
C[i, i+j+1] = sqrt((a-c)**2 + (b-d)**2)
C[i+j+1, i] = C[i, i+j+1]
return C
Ls = [(38.24, 20.42), (39.57, 26.15), (40.56, 25.32), (36.26, 23.12),
(33.48, 10.54), (37.56, 12.19), (38.42, 13.11), (37.52, 20.44),
(41.23, 9.10), (41.17, 13.05), (36.08, -5.21), (38.47, 15.13),
(38.15, 15.35), (37.51, 15.17), (35.49, 14.32), (39.36, 19.56)]
C = CostMatrix(Ls)
tour = [(i,i+1) for i in range(len(Ls)-1)]
# From the last residence back to the first one
tour = tour + [(tour[-1][1],tour[0][0])]
tour
def PlotTour(Ps, Ls):
# Report solution value
import matplotlib.pyplot as plt
import numpy as np
import pylab as pl
from matplotlib import collections as mc
lines = [[Ps[i], Ps[j]] for i,j in Ls]
lc = mc.LineCollection(lines, linewidths=2)
fig, ax = pl.subplots()
ax.add_collection(lc)
ax.autoscale()
ax.margins(0.1)
PlotTour(Ls, tour)
# Complete the following code
def SolveTSP(C):
pass
BAVIERA = [(1150.0, 1760.0), (630.0, 1660.0), (40.0, 2090.0), (750.0, 1100.0),
(1030.0, 2070.0), (1650.0, 650.0), (1490.0, 1630.0), (790.0, 2260.0),
(710.0, 1310.0), (840.0, 550.0), (1170.0, 2300.0), (970.0, 1340.0),
(510.0, 700.0), (750.0, 900.0), (1280.0, 1200.0), (230.0, 590.0),
(460.0, 860.0), (1040.0, 950.0), (590.0, 1390.0), (830.0, 1770.0),
(490.0, 500.0), (1840.0, 1240.0), (1260.0, 1500.0), (1280.0, 790.0),
(490.0, 2130.0), (1460.0, 1420.0), (1260.0, 1910.0), (360.0, 1980.0),
(750.0, 2030.0)]
def RandomTSP(n):
from numpy import random
return [(x,y) for x,y in zip(random.random(n), random.random(n))]
for n in [50, 75, 100, 150, 200]:
Ls = RandomTSP(100)
# solve tsp with your model
| 0.197444 | 0.894605 |
Matrizes
======
Os *vetores bidimensionais*, ou matrizes, são estruturas de dados que representam um conjunto de valores referenciáveis pelo mesmo nome e individualizados entre si através de sua posição de *linha* e *coluna* dentro desse conjunto. A linguagem Python não suporta listas multidimensionais (listas que podem ter duas ou mais dimensões) diretamente, mas qualquer tabela, incluindo matrizes, pode ser representada como uma lista de listas (uma lista, onde cada elemento é, por sua vez, uma lista).
## Estrutura
Veja abaixo a estrutura de uma matriz M de tamanho `n x m`, com `n = 3` e `m = 5`, com seus elementos representados por a*ij*, onde o `i` representa o índice da linha e `j` representa o índice da coluna para o elemento em questão:
| Matriz M | j = 0 | j = 1 | j = 2 | j = 3 | j = 4 |
|:--------: | :-----: | :-----: | :------: | :------: | :------: |
| **i = 0** | 8 | 2 | -8 | 4 | 0 |
| **i = 1** | -7 | 9 | 2 | -3 | 5 |
| **i = 2** | 6 | -5 | 10 | -6 | 1 |
Exemplos: `a12 = 2`, `a04 = 0` e `a10 = -7`...
## Criação
Observe abaixo um exemplo de programa que cria uma tabela numérica com duas linhas e três colunas.
```
matriz = [[7, 3, 9], [2, 1, 4]]
# Note que cada lista contida dentro da lista maior é uma linha.
print(matriz[0])
print(matriz[1])
```
O primeiro e segundo elementos da nossa matriz de nome `matriz` aqui - `a[0]` e `a[1]` - são ambos listas que contém números, `[7, 3, 9]` e `[2, 1, 4]`, respectivamente. Os seus elementos são: `a[0][0] == 7`, `a[0][1] == 3`, `a[0][2] == 9`, `a[1][0] == 2`, `a[1][1] == 1` e `a[1][2] == 4`.
Agora, suponha que dois números sejam dados, o número de linhas `n` e o de columas `m`, e você tenha que criar uma matriz de tamanho `n` × `m` com todas as posições preenchidas com algum valor, por exemplo, 0:
```
n = 3 # Número de linhas
m = 4 # Número de colunas
valor = 0 # Valor de todas as posições da matriz
minhaMatriz = []
for i in range(n):
# Criação de uma lista auxiliar para receber os valores
lista = []
for j in range(m):
# Passando os valores para a lista auxiliar
lista.append(valor)
# Adiciona a lista auxiliar na matriz
minhaMatriz.append(lista)
print(minhaMatriz)
```
Ou simplesmente, pode utilizar multiplicação de listas da seguinte forma (produzirá o mesmo resultado):
```
minhaMatriz = [[valor] * m] * n
print(minhaMatriz)
```
Como a representação de matrizes em Python é uma lista, assim como seus elementos, todas as operações de **inserção**, **remoção** e **manipulação** são as mesmas que as de uma lista comum.
## Iteração
Para iterar sobre uma lista bidimensional, normalmente usa-se loops aninhados. O modelo mais comum é o que o primeiro loop percorre o número da linha, o segundo loop percorre os elementos dentro de uma linha. Por exemplo, é assim que você exibe a lista numérica bidimensional na tela:
```
'''
A nossa matriz é:
[7, 3, 9]
[2, 1, 4]
'''
# Iteração sobre a matriz criada no primeiro exemplo
for linha in range(len(matriz)):
print("Elementos da linha " + str(linha) + ": ")
for coluna in range(len(matriz[linha])):
print matriz[linha][coluna]
```
Mas você também pode iterar iniciando pelas colunas, da seguinte maneira:
```
'''
A nossa matriz é:
[7, 3, 9]
[2, 1, 4]
'''
# Iteração sobre a matriz criada no primeiro exemplo
for coluna in range(len(matriz[0])):
print("Elementos da coluna " + str(coluna) + ": ")
for linha in range(len(matriz)):
print matriz[linha][coluna]
```
|
github_jupyter
|
matriz = [[7, 3, 9], [2, 1, 4]]
# Note que cada lista contida dentro da lista maior é uma linha.
print(matriz[0])
print(matriz[1])
n = 3 # Número de linhas
m = 4 # Número de colunas
valor = 0 # Valor de todas as posições da matriz
minhaMatriz = []
for i in range(n):
# Criação de uma lista auxiliar para receber os valores
lista = []
for j in range(m):
# Passando os valores para a lista auxiliar
lista.append(valor)
# Adiciona a lista auxiliar na matriz
minhaMatriz.append(lista)
print(minhaMatriz)
minhaMatriz = [[valor] * m] * n
print(minhaMatriz)
'''
A nossa matriz é:
[7, 3, 9]
[2, 1, 4]
'''
# Iteração sobre a matriz criada no primeiro exemplo
for linha in range(len(matriz)):
print("Elementos da linha " + str(linha) + ": ")
for coluna in range(len(matriz[linha])):
print matriz[linha][coluna]
'''
A nossa matriz é:
[7, 3, 9]
[2, 1, 4]
'''
# Iteração sobre a matriz criada no primeiro exemplo
for coluna in range(len(matriz[0])):
print("Elementos da coluna " + str(coluna) + ": ")
for linha in range(len(matriz)):
print matriz[linha][coluna]
| 0.189071 | 0.977778 |
Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License.
# AutoML: Regression with Local Compute
In this example we use the scikit-learn's [diabetes dataset](http://scikit-learn.org/stable/datasets/index.html#diabetes-dataset) to showcase how you can use AutoML for a simple regression problem.
Make sure you have executed the [00.configuration](00.configuration.ipynb) before running this notebook.
In this notebook you will learn how to:
1. Create an `Experiment` in an existing `Workspace`.
2. Configure AutoML using `AutoMLConfig`.
3. Train the model using local compute.
4. Explore the results.
5. Test the best fitted model.
## Create an Experiment
As part of the setup you have already created an Azure ML `Workspace` object. For AutoML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments.
```
import logging
import os
import random
from matplotlib import pyplot as plt
from matplotlib.pyplot import imshow
import numpy as np
import pandas as pd
from sklearn import datasets
import azureml.core
from azureml.core.experiment import Experiment
from azureml.core.workspace import Workspace
from azureml.train.automl import AutoMLConfig
from azureml.train.automl.run import AutoMLRun
ws = Workspace.from_config()
# Choose a name for the experiment and specify the project folder.
experiment_name = 'automl-local-regression'
project_folder = './sample_projects/automl-local-regression'
experiment = Experiment(ws, experiment_name)
output = {}
output['SDK version'] = azureml.core.VERSION
output['Subscription ID'] = ws.subscription_id
output['Workspace Name'] = ws.name
output['Resource Group'] = ws.resource_group
output['Location'] = ws.location
output['Project Directory'] = project_folder
output['Experiment Name'] = experiment.name
pd.set_option('display.max_colwidth', -1)
pd.DataFrame(data = output, index = ['']).T
```
## Diagnostics
Opt-in diagnostics for better experience, quality, and security of future releases.
```
from azureml.telemetry import set_diagnostics_collection
set_diagnostics_collection(send_diagnostics = True)
```
### Load Training Data
This uses scikit-learn's [load_diabetes](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_diabetes.html) method.
```
# Load the diabetes dataset, a well-known built-in small dataset that comes with scikit-learn.
from sklearn.datasets import load_diabetes
from sklearn.linear_model import Ridge
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
X, y = load_diabetes(return_X_y = True)
columns = ['age', 'gender', 'bmi', 'bp', 's1', 's2', 's3', 's4', 's5', 's6']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
```
## Configure AutoML
Instantiate an `AutoMLConfig` object to specify the settings and data used to run the experiment.
|Property|Description|
|-|-|
|**task**|classification or regression|
|**primary_metric**|This is the metric that you want to optimize. Regression supports the following primary metrics: <br><i>spearman_correlation</i><br><i>normalized_root_mean_squared_error</i><br><i>r2_score</i><br><i>normalized_mean_absolute_error</i>|
|**iteration_timeout_minutes**|Time limit in minutes for each iteration.|
|**iterations**|Number of iterations. In each iteration AutoML trains a specific pipeline with the data.|
|**n_cross_validations**|Number of cross validation splits.|
|**X**|(sparse) array-like, shape = [n_samples, n_features]|
|**y**|(sparse) array-like, shape = [n_samples, ], [n_samples, n_classes]<br>Multi-class targets. An indicator matrix turns on multilabel classification. This should be an array of integers.|
|**path**|Relative path to the project folder. AutoML stores configuration files for the experiment under this folder. You can specify a new empty folder.|
```
automl_config = AutoMLConfig(task = 'regression',
iteration_timeout_minutes = 10,
iterations = 10,
primary_metric = 'spearman_correlation',
n_cross_validations = 5,
debug_log = 'automl.log',
verbosity = logging.INFO,
X = X_train,
y = y_train,
path = project_folder)
```
## Train the Models
Call the `submit` method on the experiment object and pass the run configuration. Execution of local runs is synchronous. Depending on the data and the number of iterations this can run for a while.
In this example, we specify `show_output = True` to print currently running iterations to the console.
```
local_run = experiment.submit(automl_config, show_output = True)
local_run
```
## Explore the Results
#### Widget for Monitoring Runs
The widget will first report a "loading" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete.
**Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details.
```
from azureml.widgets import RunDetails
RunDetails(local_run).show()
```
#### Retrieve All Child Runs
You can also use SDK methods to fetch all the child runs and see individual metrics that we log.
```
children = list(local_run.get_children())
metricslist = {}
for run in children:
properties = run.get_properties()
metrics = {k: v for k, v in run.get_metrics().items() if isinstance(v, float)}
metricslist[int(properties['iteration'])] = metrics
rundata = pd.DataFrame(metricslist).sort_index(1)
rundata
```
### Retrieve the Best Model
Below we select the best pipeline from our iterations. The `get_output` method returns the best run and the fitted model. The Model includes the pipeline and any pre-processing. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*.
```
best_run, fitted_model = local_run.get_output()
print(best_run)
print(fitted_model)
```
#### Best Model Based on Any Other Metric
Show the run and the model that has the smallest `root_mean_squared_error` value (which turned out to be the same as the one with largest `spearman_correlation` value):
```
lookup_metric = "root_mean_squared_error"
best_run, fitted_model = local_run.get_output(metric = lookup_metric)
print(best_run)
print(fitted_model)
```
#### Model from a Specific Iteration
Show the run and the model from the third iteration:
```
iteration = 3
third_run, third_model = local_run.get_output(iteration = iteration)
print(third_run)
print(third_model)
```
### Test the Best Fitted Model
Predict on training and test set, and calculate residual values.
```
y_pred_train = fitted_model.predict(X_train)
y_residual_train = y_train - y_pred_train
y_pred_test = fitted_model.predict(X_test)
y_residual_test = y_test - y_pred_test
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.metrics import mean_squared_error, r2_score
# Set up a multi-plot chart.
f, (a0, a1) = plt.subplots(1, 2, gridspec_kw = {'width_ratios':[1, 1], 'wspace':0, 'hspace': 0})
f.suptitle('Regression Residual Values', fontsize = 18)
f.set_figheight(6)
f.set_figwidth(16)
# Plot residual values of training set.
a0.axis([0, 360, -200, 200])
a0.plot(y_residual_train, 'bo', alpha = 0.5)
a0.plot([-10,360],[0,0], 'r-', lw = 3)
a0.text(16,170,'RMSE = {0:.2f}'.format(np.sqrt(mean_squared_error(y_train, y_pred_train))), fontsize = 12)
a0.text(16,140,'R2 score = {0:.2f}'.format(r2_score(y_train, y_pred_train)), fontsize = 12)
a0.set_xlabel('Training samples', fontsize = 12)
a0.set_ylabel('Residual Values', fontsize = 12)
# Plot a histogram.
a0.hist(y_residual_train, orientation = 'horizontal', color = 'b', bins = 10, histtype = 'step');
a0.hist(y_residual_train, orientation = 'horizontal', color = 'b', alpha = 0.2, bins = 10);
# Plot residual values of test set.
a1.axis([0, 90, -200, 200])
a1.plot(y_residual_test, 'bo', alpha = 0.5)
a1.plot([-10,360],[0,0], 'r-', lw = 3)
a1.text(5,170,'RMSE = {0:.2f}'.format(np.sqrt(mean_squared_error(y_test, y_pred_test))), fontsize = 12)
a1.text(5,140,'R2 score = {0:.2f}'.format(r2_score(y_test, y_pred_test)), fontsize = 12)
a1.set_xlabel('Test samples', fontsize = 12)
a1.set_yticklabels([])
# Plot a histogram.
a1.hist(y_residual_test, orientation = 'horizontal', color = 'b', bins = 10, histtype = 'step')
a1.hist(y_residual_test, orientation = 'horizontal', color = 'b', alpha = 0.2, bins = 10)
plt.show()
```
|
github_jupyter
|
import logging
import os
import random
from matplotlib import pyplot as plt
from matplotlib.pyplot import imshow
import numpy as np
import pandas as pd
from sklearn import datasets
import azureml.core
from azureml.core.experiment import Experiment
from azureml.core.workspace import Workspace
from azureml.train.automl import AutoMLConfig
from azureml.train.automl.run import AutoMLRun
ws = Workspace.from_config()
# Choose a name for the experiment and specify the project folder.
experiment_name = 'automl-local-regression'
project_folder = './sample_projects/automl-local-regression'
experiment = Experiment(ws, experiment_name)
output = {}
output['SDK version'] = azureml.core.VERSION
output['Subscription ID'] = ws.subscription_id
output['Workspace Name'] = ws.name
output['Resource Group'] = ws.resource_group
output['Location'] = ws.location
output['Project Directory'] = project_folder
output['Experiment Name'] = experiment.name
pd.set_option('display.max_colwidth', -1)
pd.DataFrame(data = output, index = ['']).T
from azureml.telemetry import set_diagnostics_collection
set_diagnostics_collection(send_diagnostics = True)
# Load the diabetes dataset, a well-known built-in small dataset that comes with scikit-learn.
from sklearn.datasets import load_diabetes
from sklearn.linear_model import Ridge
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
X, y = load_diabetes(return_X_y = True)
columns = ['age', 'gender', 'bmi', 'bp', 's1', 's2', 's3', 's4', 's5', 's6']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
automl_config = AutoMLConfig(task = 'regression',
iteration_timeout_minutes = 10,
iterations = 10,
primary_metric = 'spearman_correlation',
n_cross_validations = 5,
debug_log = 'automl.log',
verbosity = logging.INFO,
X = X_train,
y = y_train,
path = project_folder)
local_run = experiment.submit(automl_config, show_output = True)
local_run
from azureml.widgets import RunDetails
RunDetails(local_run).show()
children = list(local_run.get_children())
metricslist = {}
for run in children:
properties = run.get_properties()
metrics = {k: v for k, v in run.get_metrics().items() if isinstance(v, float)}
metricslist[int(properties['iteration'])] = metrics
rundata = pd.DataFrame(metricslist).sort_index(1)
rundata
best_run, fitted_model = local_run.get_output()
print(best_run)
print(fitted_model)
lookup_metric = "root_mean_squared_error"
best_run, fitted_model = local_run.get_output(metric = lookup_metric)
print(best_run)
print(fitted_model)
iteration = 3
third_run, third_model = local_run.get_output(iteration = iteration)
print(third_run)
print(third_model)
y_pred_train = fitted_model.predict(X_train)
y_residual_train = y_train - y_pred_train
y_pred_test = fitted_model.predict(X_test)
y_residual_test = y_test - y_pred_test
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.metrics import mean_squared_error, r2_score
# Set up a multi-plot chart.
f, (a0, a1) = plt.subplots(1, 2, gridspec_kw = {'width_ratios':[1, 1], 'wspace':0, 'hspace': 0})
f.suptitle('Regression Residual Values', fontsize = 18)
f.set_figheight(6)
f.set_figwidth(16)
# Plot residual values of training set.
a0.axis([0, 360, -200, 200])
a0.plot(y_residual_train, 'bo', alpha = 0.5)
a0.plot([-10,360],[0,0], 'r-', lw = 3)
a0.text(16,170,'RMSE = {0:.2f}'.format(np.sqrt(mean_squared_error(y_train, y_pred_train))), fontsize = 12)
a0.text(16,140,'R2 score = {0:.2f}'.format(r2_score(y_train, y_pred_train)), fontsize = 12)
a0.set_xlabel('Training samples', fontsize = 12)
a0.set_ylabel('Residual Values', fontsize = 12)
# Plot a histogram.
a0.hist(y_residual_train, orientation = 'horizontal', color = 'b', bins = 10, histtype = 'step');
a0.hist(y_residual_train, orientation = 'horizontal', color = 'b', alpha = 0.2, bins = 10);
# Plot residual values of test set.
a1.axis([0, 90, -200, 200])
a1.plot(y_residual_test, 'bo', alpha = 0.5)
a1.plot([-10,360],[0,0], 'r-', lw = 3)
a1.text(5,170,'RMSE = {0:.2f}'.format(np.sqrt(mean_squared_error(y_test, y_pred_test))), fontsize = 12)
a1.text(5,140,'R2 score = {0:.2f}'.format(r2_score(y_test, y_pred_test)), fontsize = 12)
a1.set_xlabel('Test samples', fontsize = 12)
a1.set_yticklabels([])
# Plot a histogram.
a1.hist(y_residual_test, orientation = 'horizontal', color = 'b', bins = 10, histtype = 'step')
a1.hist(y_residual_test, orientation = 'horizontal', color = 'b', alpha = 0.2, bins = 10)
plt.show()
| 0.819099 | 0.951549 |
# Intro to Jupyter Notebooks
### `Jupyter` is a project for developing open-source software
### `Jupyter Notebooks` is a `web` application to create scripts
### But it is more than that
#### It lets you insert and save text, equations & visualizations ... in the same page!

## From here forward, we will practice as we go
<div class="alert alert-block alert-info"><b>Instructions</b> are highlighted in blue</div>
# Notebook dashboard
When you launch the Jupyter notebook server in your computer, you would see a dashboard like this:

<div class="alert alert-block alert-info">Create a new script by clicking on the <b> New</b> button</div>
# Rename a script
At the top of the page, next to the Jupyter logo at the top-left is the name of the script (no extension). By clicking on the name you will get a pop-up window should come up to let you rename the file.
# Saving your own script
All scripts we are showing here today are running online & we will make changes through the workshop. To keep your modified script for further reference, you will need to save a copy on your own computer at the end.
<div class="alert alert-block alert-info">
<b>Try it right now! </b>
- Go to <b>File</b> in the top menu -> Download As -> Notebook </div>
<br>
Any changes made online, even if saved (not downloaded) will be lost once the binder connection is closed.
***
## Two type of cells
### `Code` Cells: execute code
### `Markdown` Cells: show formated text
There are two ways to change the type on a cell:
- Cliking on the scroll-down menu on the top
- using the shortcut `Esc-y` for code and `Esc-m` for markdown types
<br>
<div class="alert alert-block alert-info"><b>Try it out! </b>
<br>- Click on the next cell
<br>- Change the type using the scroll-down menu & select <b>Code</b>
<br>- Change it back to <b>Markdown</b>
</div>
## This is a simple operation
y = 4 + 6
print(y)
## <i>Note the change in format of the first line & the text color in the second line</i>
<div class="alert alert-block alert-info"><b>Try again in the next cell</b>
<br>- Double-Click on the next cell
<br>- Press <b> Esc</b> (note to blue color of the left border)
<br>- Type <b>y</b> to change it to <b>Code</b> type
<br>- Use <b>m</b> to change it back to <b>Markdown</b> type
</div>
```
# This is a simple operation
y = 4 + 6
print(y)
```
***
# To execute commands
## - `Shift-Enter` : executes cell & advance to next
## - `Control-enter` : executes cell & stay in the same cell
<div class="alert alert-block alert-info"><b>Try it in the previous cell</b>
<br>- Double-Click on the previous cell
<br>- Use <b>Shift-Enter</b> to execute
<br>- Double-Click on the in the previous cell again
<br>- This time use <b>Control-Enter</b> to execute
<br>
<br>- Now change the type to <b>Code</b> & execute the cell
</div>
## You could also execute the entire script use the `Run` tab in the top menu
## Or even the entire script from the `Cell` menu at the top
***
## Other commands
### From the icon menu:
### Save, Add Cell, Cut Cell, Copy Cell, Paste Cell, Move Cell Up, Move Cell Down

### or the drop down menu 'command palette'
<div class="alert alert-block alert-info"><b>Try them out!</b>
## Now, the keyboard shortcuts
#### First press `Esc`, then:
- `s` : save changes
<br>
- `a`, `b` : create cell above and below
<br>
- `dd` : delete cell
<br>
- `x`, `c`, `v` : cut, copy and paste cell
<br>
- `z` undo last change
<div class="alert alert-block alert-info">
<b> Let's practice!</b>
<br>- Create a cell bellow with <b>Esc-b</b>, and click on it
<br>- Type print('Hello world!') and execute it using <b>Control-Enter</b>
<br>- Copy-paste the cell to make a duplicate by typing <b>Esc-c</b> & <b>Esc-v</b>
<br>- Cut the first cell using <b>Esc-x</b>
</div>
## And another one
- `Esc-l` : add lines
<div class="alert alert-block alert-info">
<b>- Try it out in the cell below!</b>
<br>
- And now try it in the markdown cell
</div>
```
y = 5
print(y + 4)
x = 8
print(y*x)
```
***
## Last note about the `Kernel`
#### That little program that is running in the background & let you run your notebook
<div class="alert alert-block alert-danger">
Once in a while the <b>kernel</b> will die or your program will get stucked, & like everything else in the computer world.... you'll have to restart it.
</div>
### You can do this by going to the `Kernel` menu -> Restart, & then you'll have to run all your cells (or at least the ones above the one you're working on (use `Cell` menu -> Run all Above.
|
github_jupyter
|
# This is a simple operation
y = 4 + 6
print(y)
y = 5
print(y + 4)
x = 8
print(y*x)
| 0.212477 | 0.860545 |
Import the libraries, as always, and read in the data
```
from galyleo.galyleo_table import GalyleoTable
from galyleo.galyleo_constants import GALYLEO_STRING, GALYLEO_NUMBER
from galyleo.galyleo_jupyterlab_client import GalyleoClient
import csv
def state_code(current_code):
if current_code == '': return ''
canada_codes = {'ab', 'bc', 'sk', 'mn', 'on', 'qc', 'pe', 'nb', 'ns', 'nl', 'nt', 'nu', 'yt'}
country = 'CA' if current_code in canada_codes else 'US'
return f'{country}-{current_code.upper()}'
def cleanse_row(row):
values = [entry.strip() for entry in row]
return [int(values[i]) for i in range(3) ] + values[3:5] + [state_code(values[5]), values[6], float(values[7])]
# read the file and make a table
with open('ufos.csv', 'r') as ufo_file:
reader = csv.reader(ufo_file)
column_names = next(reader)
data = [cleanse_row(row) for row in reader]
ufo_file.close()
data[:10]
column_names = [name.strip() for name in column_names]
schema = [(column_names[i], GALYLEO_NUMBER) for i in range(3)] + [(column_names[i], GALYLEO_STRING) for i in range(3,7)] + [(column_names[7], GALYLEO_STRING)]
table = GalyleoTable('ufos')
table.load_from_schema_and_data(schema, data)
```
Aggregate by year, month, country
```
sightings_by_country_year_month = table.aggregate_by([ 'country', 'year', 'month'], 'count', 'aggregate_cym')
```
Create a Dashboard using the Launcher or the File menu, then execute the next cell to send the data to it
```
client = GalyleoClient()
client.send_data_to_dashboard(sightings_by_country_year_month)
```
Aggregate by year, and country
```
sightings_by_country_year = table.aggregate_by( [ 'country', 'year', ], 'count', 'aggregate_cy')
client.send_data_to_dashboard(sightings_by_country_year)
```
Use this to get North American sightings
```
north_american_sightings = table.filter_by_function("country", lambda x: x in {'ca', 'us'}, 'north_america_table')
```
Aggregate by state, year, month and by state and month
```
sightings_by_state_year_month = north_american_sightings.aggregate_by([ 'state', 'year', 'month'], 'count', 'aggregate_sym')
sightings_by_state_year = north_american_sightings.aggregate_by([ 'state', 'year'], 'count', 'aggregate_sy')
client.send_data_to_dashboard(sightings_by_state_year)
client.send_data_to_dashboard(sightings_by_state_year_month)
```
Aggregate by state, year, month, type and by country, year, month, type
```
sightings_by_state_year_type = north_american_sightings.aggregate_by([ 'state', 'year', 'type'], 'count', 'aggregate_syt')
sightings_by_country_year_type = table.aggregate_by([ 'country', 'year', 'type'], 'count', 'aggregate_cyt')
client.send_data_to_dashboard(sightings_by_state_year_type)
client.send_data_to_dashboard(sightings_by_country_year_type)
```
|
github_jupyter
|
from galyleo.galyleo_table import GalyleoTable
from galyleo.galyleo_constants import GALYLEO_STRING, GALYLEO_NUMBER
from galyleo.galyleo_jupyterlab_client import GalyleoClient
import csv
def state_code(current_code):
if current_code == '': return ''
canada_codes = {'ab', 'bc', 'sk', 'mn', 'on', 'qc', 'pe', 'nb', 'ns', 'nl', 'nt', 'nu', 'yt'}
country = 'CA' if current_code in canada_codes else 'US'
return f'{country}-{current_code.upper()}'
def cleanse_row(row):
values = [entry.strip() for entry in row]
return [int(values[i]) for i in range(3) ] + values[3:5] + [state_code(values[5]), values[6], float(values[7])]
# read the file and make a table
with open('ufos.csv', 'r') as ufo_file:
reader = csv.reader(ufo_file)
column_names = next(reader)
data = [cleanse_row(row) for row in reader]
ufo_file.close()
data[:10]
column_names = [name.strip() for name in column_names]
schema = [(column_names[i], GALYLEO_NUMBER) for i in range(3)] + [(column_names[i], GALYLEO_STRING) for i in range(3,7)] + [(column_names[7], GALYLEO_STRING)]
table = GalyleoTable('ufos')
table.load_from_schema_and_data(schema, data)
sightings_by_country_year_month = table.aggregate_by([ 'country', 'year', 'month'], 'count', 'aggregate_cym')
client = GalyleoClient()
client.send_data_to_dashboard(sightings_by_country_year_month)
sightings_by_country_year = table.aggregate_by( [ 'country', 'year', ], 'count', 'aggregate_cy')
client.send_data_to_dashboard(sightings_by_country_year)
north_american_sightings = table.filter_by_function("country", lambda x: x in {'ca', 'us'}, 'north_america_table')
sightings_by_state_year_month = north_american_sightings.aggregate_by([ 'state', 'year', 'month'], 'count', 'aggregate_sym')
sightings_by_state_year = north_american_sightings.aggregate_by([ 'state', 'year'], 'count', 'aggregate_sy')
client.send_data_to_dashboard(sightings_by_state_year)
client.send_data_to_dashboard(sightings_by_state_year_month)
sightings_by_state_year_type = north_american_sightings.aggregate_by([ 'state', 'year', 'type'], 'count', 'aggregate_syt')
sightings_by_country_year_type = table.aggregate_by([ 'country', 'year', 'type'], 'count', 'aggregate_cyt')
client.send_data_to_dashboard(sightings_by_state_year_type)
client.send_data_to_dashboard(sightings_by_country_year_type)
| 0.516839 | 0.793266 |
```
10 + 10
'Fernanda'
```
# Importando os dados
Nessa imersão nós vamos mergulhar no universo da biologia e da biotecnologia e explorar uma base de dados da área.
Para a nossa análise, está faltando então os dados. Para conseguirmos esses dados vamos acessar o Github, nesse link:
https://github.com/alura-cursos/imersaodados3/tree/main/dados
Então, agora vamos importar essa base de dados para dentro do nosso notebook. Para juntar essas informações vamos utilizar nossa famosa biblioteca do "Pandas".
Vamos importar essa biblioteca através do seguinte código:
```
import pandas as pd
url_dados = 'https://github.com/alura-cursos/imersaodados3/blob/main/dados/dados_experimentos.zip?raw=true'
dados = pd.read_csv(url_dados, compression = 'zip')
dados
```
Observe que imprimimos agora todas as nossas linhas e colunas da nossa tabela.
Aqui observamos que temos dados referentes a ID, tratamento, tempo, dose, entre outras.
Para nos auxiliar na visualização e na identificação das colunas dos dados que vamos trabalhar, faremos um recorte, uma espécie de cabeçalho, com 5 linhas, e para isso usamos o seguinte comando:
```
dados.head()
```
Quando estávamos trabalhando com o conjunto total de dados nós tínhamos a informação do número total de linhas e colunas. Porém, agora com o head, nós perdemos essa informação. Então, caso você queira recuperar a informação do total de linhas e colunas basta utilizar:
```
dados.shape
```
Então, agora vamos voltar para o nosso problema.
Nós queremos iniciar fazendo uma análise dos dados que estamos tratando. Vamos começar selecionando só uma coluna, para entendermos do que ela está tratando.
Vamos iniciar com a coluna tratamento. Para isso vamos digitar:
```
dados['tratamento']
```
Esse tipo de informação, da coluna, é o que chamamos de 'série'. Já a tabela inteira nós chamos de 'dataframe'.
Agora para vermos os tipos específicos de informação que temos dentro dessa coluna vamos digitar:
```
dados['tratamento'].unique()
```
A resposta foi dada no formato de array, que nada mais é do que um vetor, ou seja, um tipo de estrutura de dados.
Observe que encontramos dois tipos: 'com droga' e 'com controle'.
Com droga, como o próprio nome nos diz, é quando estamos aplicando algum tipo de droga para a amostra. Já com o controle é uma técnica estatística em que isolamos as outras varíaveis e observamos apenas a variável de interesse.
Agora vamos analisar a coluna do tempo:
```
dados['tempo'].unique()
```
Encontramos 3 tipos de informação nessa coluna: 24, 72 e 48. O que pode nos indicar o tempo, provavelmente em horas, de intervalo que foi administrada a dose de medicamentos ou drogas.
É interessante que observamos o comportamento das células com essa diferença de tempo, já que se analisássemos num período diferente poderia não dar tempo suficiente pra célula manifestar o determinado comportamento.
Além do tempo, podemos analisar também a coluna de dose:
```
dados['dose'].unique()
```
Aqui obtemos dois tipos de doses diferentes, d1 e d2, mas não conseguimos afirmar nada categoricamente de antemão.
Vamos então analisar a categoria droga:
```
dados['droga'].unique()
```
Agora com a resposta de ```dados['droga'].unique()``` obtivemos essa série de códigos como resposta. Talvez esses números foram codificados na tentativa de anonimizar os tipos de drogas usadas, para tentar evitar qualquer tipo de viés na análise dos resultados.
Há uma série de regras quando alguns experimentos são feitos, evitando mostrar dados como nome, sexo, e outros fatores, dependendo da análise a ser feita, para que sejam evitados vieses.
Vamos analisar a coluna nomeada como g-0 agora:
```
dados['g-0'].unique()
```
Só olhando fica difícil tentar deduzir o que esses números representam. Então, nesse ponto, com o auxílio da Vanessa, especialista, conseguimos a informação que essa letra 'g' da coluna remete à palavra gene. Ou seja, esses números nos dizem a expressão de cada gene frente as drogas ou a exposição.
Quando subimos de volta na tabela, percebemos que há diversos valores, inclusive com várias casas decimais. Aparentemente esses números foram "arredondados", normalizados, para podermos compará-los de alguma forma.
Então, até agora na nossa análise, já conseguimos identificar e entender as informações de diferentes colunas; as colunas que nos indicam os tratamentos, as drogas, o tempo, e depois as respostas genéticas, que tem a letra g no ínicio do seu nome.
Outro tipo de análise que podemos fazer agora é entender a distribuição dessas informações. Por exemplo, podemos saber quantos experimentos temos utilizando droga e quantos não utilizam; quantos usam o d1, e quantos utilizam o d2, e assim por diante.
Como podemos fazer isso? Vamos utilizar esse código:
```
dados['tratamento'].value_counts()
```
Agora, em vez de usarmos o ```.unique``` no código, nós utilizamos o ```.value_counts``` já que nosso objetivo era contar a quantidade de valores que apareciam nas colunas, nesse caso, na coluna ```tratamento```.
No resultado ele retornou 2 linhas: uma com a quantidade de elementos que temos, ou seja, com a minha frequência, na categoria ```com_droga``` e a outra com a quantidade que temos na categoria ```com_controle```.
Houve uma grande diferença no resultado dessas categorias, não é mesmo? Isso é, no mínimo, curioso. Então, Guilherme vai nos sugerir um desafio, de investigar o por quê dessa diferença tão grande.
Nesse momento o Thiago já escreve outros dois desafios, o desafio #2 e o desafio #3. E a Vanessa também deixa o seu, desafio #4. Todos os desafios dessa aula estão escritos no final desse notebook.
Será que essa diferença entre os valores dentro das categorias continuam tão desproporcionais? Vamos investigar a categoria ```dose```:
```
dados['dose'].value_counts()
```
Nesse ponto parece que as coisas já estão mais equilibradas. Mas somente com o número é difícil fazermos uma leitura mais aprofundada. Então, nesse momento, vamos resolver um dos desafios deixados. Então, se até aqui você ainda nao resolveu os desafios do vídeo e nao quer receber um spoiler, pare aqui e tente resolvê-los primeiro.
Para entendermos melhor e fazer um comparativo é legal traçarmos a proporção entre os dados. Vamos escrever o código agora, setando como parâmetro, dentro dos parênteses o ```normalize = True``` :
```
dados['tratamento'].value_counts(normalize = True)
```
Temos agora os dados "normalizados". Podemos interpretar utilizando a porcentagem (multiplicando por 100), o que nos daria algo em torno de 92% versus 8% (aproximadamente), mostrando o tamanho da desproporção.
Vamos fazer o mesmo agora com a coluna ```dose```:
```
dados['dose'].value_counts(normalize = True)
```
Temos aqui o resultado. Inclusive, como mini-desafio, você pode fazer o mesmo processo com as outras colunas.
Certo, mas geralmente, quando há a trasmissão dessas informações, vemos uma série de gráficos. E, geralmente, é usado aquele gráfico que parece uma torta, ou uma pizza.
Vamos então plotar um gráfico dessa tipo, com o seguinte código:
```
dados['tratamento'].value_counts().plot.pie()
```
Aqui, em azul, nós temos a quantidade de tratamento com drogas(que é muito maior, em proporção), e em laranja temos o tratamento "com controle". Ficou até parecendo um pacman, hehe.
Vamos aproveitar e analisar as outras informações com os gráficos também, para vermos o que acontece. Vamos analisar a coluna tempo:
```
dados['tempo'].value_counts().plot.pie()
```
Repare como ficou difícil a olho nú identificarmos qual observação é maior através do gráfico. Esse tipo de gráfico pode acabar dificultando um pouco a análise, especialmente quando as informações estão melhor balanceadas.
Além de identificar a quantidade de horas observadas, nós não conseguimos extrair mais nenhuma informação desse gráfico. Inclusive, nós temos uma espécie de regrinha, que diz que quando formos fazer algum tipo de análise, procurarmos evitar o uso de gráficos que nos remetem a comida, como por exemplo: rosca, pizza, torta, e assim por diante.
Então qual tipo de gráfico poderíamos utlizar para melhorarmos nossa visualização? Vamos utilizar o gráfico de barras, através deste código:
```
dados['tempo'].value_counts().plot.bar()
```
Agora sim conseguimos identificar com mais facilidade qual observação tem a sua maior frequência. Na verdade a quantidade de horas com o maior número de observações foi a de 48.
No eixo y temos o número que nos remete ao número de observações. Então podemos observar que tivemos pouco mais de 8000 observações relativas às 48 horas.
Então, o gráfico de barras acabou sendo muit mais útil, nesse caso, do que o gráfico de pizza.
Ao longo da aula nós falamos sobre a expressão gênica. Se nós voltarmos à tabela, na coluna g-0, nós temos alguns valores dentro de um intervalo definido. Para não termos valores muito distantes entre si, é bastante comum no meio científico que haja uma normalização dos resultados, para criamos um intervalo que não seja tão grande, em que o meio dessa distribuição seja o 0.
Como nós podemos saber em quais linhas dessa coluna, o meu valor está acima de 0?
Vamos fazer uma consulta nos nossos dados, da seguinte maneira:
```
dados_filtrados = dados[dados['g-0'] > 0]
dados_filtrados.head()
```
Dessa maneira, temos somente as 5 primeiras linhas com o valores maiores do que 0 na coluna g-0, com a ajuda dessa máscara com o código ```[dados['g-0']>0]```.
Dessa mesma forma que aplicamos essa máscara, podemos utilizar o mesmo caminho, ou outras 'querys', para responder várias outras perguntas.
Assim, temos outro desafio, o #5, de procurar na documentação do pandas o método query.
Além desse temos outros desafios, o #6 e o #7 e o #8, todos eles estão logo abaixo.
```
dados
```
|
github_jupyter
|
10 + 10
'Fernanda'
import pandas as pd
url_dados = 'https://github.com/alura-cursos/imersaodados3/blob/main/dados/dados_experimentos.zip?raw=true'
dados = pd.read_csv(url_dados, compression = 'zip')
dados
dados.head()
dados.shape
dados['tratamento']
dados['tratamento'].unique()
dados['tempo'].unique()
dados['dose'].unique()
dados['droga'].unique()
dados['g-0'].unique()
dados['tratamento'].value_counts()
dados['dose'].value_counts()
dados['tratamento'].value_counts(normalize = True)
dados['dose'].value_counts(normalize = True)
dados['tratamento'].value_counts().plot.pie()
dados['tempo'].value_counts().plot.pie()
dados['tempo'].value_counts().plot.bar()
dados_filtrados = dados[dados['g-0'] > 0]
dados_filtrados.head()
dados
| 0.416915 | 0.946892 |

# Train POS Tagger in French by Spark NLP
### Based on Universal Dependency `UD_French-GSD` version 2.3
### Spark `2.4` and Spark NLP `2.0.1`
```
import sys
sys.path.append('../../')
import time
#Spark ML and SQL
from pyspark.ml import Pipeline, PipelineModel
from pyspark.sql.functions import array_contains
from pyspark.sql import SparkSession
from pyspark.sql.types import StructType, StructField, IntegerType, StringType
#Spark NLP
from sparknlp.annotator import *
from sparknlp.common import RegexRule
from sparknlp.base import DocumentAssembler, Finisher
```
### Let's create a Spark Session for our app
```
spark = SparkSession.builder \
.appName("Training_Perceptron")\
.master("local[*]")\
.config("spark.driver.memory","6G")\
.config("spark.driver.maxResultSize", "2G")\
.config("spark.jars", "/tmp/sparknlp.jar")\
.config("spark.driver.extraClassPath", "/tmp/sparknlp.jar")\
.config("spark.executor.extraClassPath", "/tmp/sparknlp.jar")\
.config("spark.kryoserializer.buffer.max", "500m")\
.getOrCreate()
spark.version
```
Let's prepare our training datasets containing `token_posTag` like `de_DET`. You can download this data set from Amazon S3:
```
wget -N https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/resources/fr/pos/UD_French/UD_French-GSD_2.3.txt -P /tmp
```
```
# Download CoNLL-U French-GSD already converted to token_tag
# Download CoNLL 2003 Dataset
import os
from pathlib import Path
import urllib.request
url = 'https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/resources/fr/pos/UD_French/'
file_train='UD_French-GSD_2.3.txt'
full_path='/tmp/'+file_train
if not Path(full_path).is_file():
print('Downloading '+file_train)
urllib.request.urlretrieve(url+file_train, full_path)
from sparknlp.training import POS
training_data = POS().readDataset(spark, '/tmp/UD_French-GSD_2.3.txt', '_', 'tags')
training_data.show()
document_assembler = DocumentAssembler() \
.setInputCol("text")
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence")
tokenizer = Tokenizer() \
.setInputCols(["sentence"]) \
.setOutputCol("token")\
.addInfixPattern("(\\w+)([^\\s\\p{L}]{1})+(\\w+)")\
.addInfixPattern("(\\w+'{1})(\\w+)")\
.addInfixPattern("(\\p{L}+)(n't\\b)")\
.addInfixPattern("((?:\\p{L}\\.)+)")\
.addInfixPattern("([\\$#]?\\d+(?:[^\\s\\d]{1}\\d+)*)")\
.addInfixPattern("([\\p{L}\\w]+)")
posTagger = PerceptronApproach() \
.setNIterations(6) \
.setInputCols(["sentence", "token"]) \
.setOutputCol("pos") \
.setPosCol("tags")
pipeline = Pipeline(stages=[
document_assembler,
sentence_detector,
tokenizer,
posTagger
])
%%time
# Let's train our Pipeline by using our training dataset
model = pipeline.fit(training_data)
```
This is our testing DataFrame where we get some sentences in French. We are going to use our trained Pipeline to transform these sentence and predict each token's `Part Of Speech`.
```
dfTest = spark.createDataFrame([
"Je sens qu'entre ça et les films de médecins et scientifiques fous que nous avons déjà vus, nous pourrions emprunter un autre chemin pour l'origine.",
"On pourra toujours parler à propos d'Averroès de décentrement du Sujet."
], StringType()).toDF("text")
predict = model.transform(dfTest)
predict.select("token.result", "pos.result").show()
```
|
github_jupyter
|
import sys
sys.path.append('../../')
import time
#Spark ML and SQL
from pyspark.ml import Pipeline, PipelineModel
from pyspark.sql.functions import array_contains
from pyspark.sql import SparkSession
from pyspark.sql.types import StructType, StructField, IntegerType, StringType
#Spark NLP
from sparknlp.annotator import *
from sparknlp.common import RegexRule
from sparknlp.base import DocumentAssembler, Finisher
spark = SparkSession.builder \
.appName("Training_Perceptron")\
.master("local[*]")\
.config("spark.driver.memory","6G")\
.config("spark.driver.maxResultSize", "2G")\
.config("spark.jars", "/tmp/sparknlp.jar")\
.config("spark.driver.extraClassPath", "/tmp/sparknlp.jar")\
.config("spark.executor.extraClassPath", "/tmp/sparknlp.jar")\
.config("spark.kryoserializer.buffer.max", "500m")\
.getOrCreate()
spark.version
wget -N https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/resources/fr/pos/UD_French/UD_French-GSD_2.3.txt -P /tmp
# Download CoNLL-U French-GSD already converted to token_tag
# Download CoNLL 2003 Dataset
import os
from pathlib import Path
import urllib.request
url = 'https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/resources/fr/pos/UD_French/'
file_train='UD_French-GSD_2.3.txt'
full_path='/tmp/'+file_train
if not Path(full_path).is_file():
print('Downloading '+file_train)
urllib.request.urlretrieve(url+file_train, full_path)
from sparknlp.training import POS
training_data = POS().readDataset(spark, '/tmp/UD_French-GSD_2.3.txt', '_', 'tags')
training_data.show()
document_assembler = DocumentAssembler() \
.setInputCol("text")
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence")
tokenizer = Tokenizer() \
.setInputCols(["sentence"]) \
.setOutputCol("token")\
.addInfixPattern("(\\w+)([^\\s\\p{L}]{1})+(\\w+)")\
.addInfixPattern("(\\w+'{1})(\\w+)")\
.addInfixPattern("(\\p{L}+)(n't\\b)")\
.addInfixPattern("((?:\\p{L}\\.)+)")\
.addInfixPattern("([\\$#]?\\d+(?:[^\\s\\d]{1}\\d+)*)")\
.addInfixPattern("([\\p{L}\\w]+)")
posTagger = PerceptronApproach() \
.setNIterations(6) \
.setInputCols(["sentence", "token"]) \
.setOutputCol("pos") \
.setPosCol("tags")
pipeline = Pipeline(stages=[
document_assembler,
sentence_detector,
tokenizer,
posTagger
])
%%time
# Let's train our Pipeline by using our training dataset
model = pipeline.fit(training_data)
dfTest = spark.createDataFrame([
"Je sens qu'entre ça et les films de médecins et scientifiques fous que nous avons déjà vus, nous pourrions emprunter un autre chemin pour l'origine.",
"On pourra toujours parler à propos d'Averroès de décentrement du Sujet."
], StringType()).toDF("text")
predict = model.transform(dfTest)
predict.select("token.result", "pos.result").show()
| 0.317426 | 0.914367 |
# LIP LANDMARKS EXPERIMENTS
```
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import tqdm
from sklearn.externals import joblib
import sys
sys.path.append('../')
import utils
kp = sio.loadmat('vid_kp_concat_keys.mat')['vidKp']
kp.shape
lip_lms = np.reshape(kp, (-1, 2, 20))
lip_lms.shape
plt.scatter(lip_lms[0][0], -lip_lms[0][1])
lip_lms_wrong = np.reshape(kp, (-1, 20, 2))
lip_lms_wrong.shape
plt.scatter(lip_lms_wrong[0][:, 0], -lip_lms_wrong[0][:, 1])
plt.scatter(np.mean(lip_lms[:, 0], axis=0), -np.mean(lip_lms[:, 1], axis=0))
plt.scatter(np.std(lip_lms[:, 0], axis=0), -np.std(lip_lms[:, 1], axis=0))
```
## Eigenvalues & Eigenvectors
```
def find_eigenvalues_and_eigenvectors_simply(A):
L = 1 / len(A.T) * np.dot(A, A.T)
e, u = np.linalg.eig(L)
w = e
v = np.dot(A.T, u)
return w, v
def find_eigenvalues_and_eigenvectors(A):
return np.linalg.eig(1 / len(A) * np.dot(A.T, A))
eigenvalues, eigenvectors = find_eigenvalues_and_eigenvectors(kp)
print(eigenvalues.shape, eigenvectors.shape)
print(eigenvectors)
# Number of eigenfaces to be plotted
N = 40
plt.figure(figsize=(15, 2*(N+5)//5))
for i in range(N):
# Make a subplot
plt.subplot((N + 5)//5, 5, i+1)
# Remember eigenvectors are **columns** in the matrix
plt.scatter(np.reshape(eigenvectors[:, i].T, (2, 20))[0], -np.reshape(eigenvectors[:, i].T, (2, 20))[1])
plt.title(i)
# plt.axis('off')
# Plot r vs M
# Values of M to consider: 1, 2,..., n
M = np.array(range(1, len(eigenvalues) + 1))
# Calculate r for all values of M
r = np.cumsum(eigenvalues)/np.sum(eigenvalues)
# Plot r vs M
plt.plot(M, r)
plt.xlabel("M", fontsize=20)
plt.ylabel("r", fontsize=20)
plt.grid("on")
plt.show()
```
# EXP 0. Remove landmarks that are all 0
```
count = 0
zero_i = []
for i, lm in tqdm.tqdm(enumerate(lip_lms), total=len(lip_lms)):
if np.sum(lm) == 0:
zero_i.append(i)
count += 1
print(count)
lip_lms_wo_0 = np.delete(lip_lms, zero_i, axis=0)
lip_lms_wo_0.shape
```
# EXP 1. Align landmarks, set left at (-1, 0) and right at (1, 0)
```
def align_lm(lm):
angle = np.arctan((lm[1, 6] - lm[1, 0])/(lm[0, 6] - lm[0, 0] + 1e-8))
rot_lm = np.dot([[np.cos(angle), np.sin(angle)], [-np.sin(angle), np.cos(angle)]], lm)
aligned_lm = (rot_lm - rot_lm[:, 0].reshape(2, 1)) / (np.max(rot_lm[0]) - np.min(rot_lm[0]) + 1e-8) * 2 - np.array([[1], [0]])
aligned_lm[aligned_lm > 1.] = 1.
aligned_lm[aligned_lm < -1.] = -1.
return aligned_lm
aligned_lms_wo_0 = []
for lm in tqdm.tqdm(lip_lms_wo_0):
aligned_lms_wo_0.append(align_lm(lm))
aligned_lms_wo_0 = np.array(aligned_lms_wo_0)
# # np.save('vid_kp_concat_keys_aligned', aligned_lms)
# sio.savemat('vid_kp_concat_keys_aligned_wo_0.mat', {'vidKp': aligned_lms_wo_0.reshape(-1, 40)})
aligned_lms_wo_0 = sio.loadmat('vid_kp_concat_keys_aligned_wo_0.mat')['vidKp'].reshape(-1, 2, 20)
aligned_lms = []
for lm in tqdm.tqdm(lip_lms):
aligned_lms.append(align_lm(lm))
aligned_lms = np.array(aligned_lms)
# # np.save('vid_kp_concat_keys_aligned', aligned_lms)
# sio.savemat('vid_kp_concat_keys_aligned.mat', {'vidKp': aligned_lms.reshape(-1, 40)})
aligned_lms = sio.loadmat('vid_kp_concat_keys_aligned.mat')['vidKp'].reshape(-1, 2, 20)
```
### Find eigenvectors and eigenvalues
```
eigenvalues_aligned, eigenvectors_aligned = find_eigenvalues_and_eigenvectors(aligned_lms.reshape(-1, 40))
# Number of eigenfaces to be plotted
N = 40
plt.figure(figsize=(15, 2*(N+5)//5))
for i in range(N):
# Make a subplot
plt.subplot((N + 5)//5, 5, i+1)
# Remember eigenvectors are **columns** in the matrix
plt.scatter(np.reshape(eigenvectors_aligned[:, i].T, (2, 20))[0], np.reshape(eigenvectors_aligned[:, i].T, (2, 20))[1])
plt.title(i)
# plt.axis('off')
# Plot r vs M
# Values of M to consider: 1, 2,..., n
M = np.array(range(1, len(eigenvalues_aligned) + 1))
# Calculate r for all values of M
r = np.cumsum(eigenvalues_aligned)/np.sum(eigenvalues_aligned)
# Plot r vs M
plt.plot(M, r)
plt.xlabel("M", fontsize=20)
plt.ylabel("r", fontsize=20)
plt.grid("on")
plt.show()
# Number of lips to be plotted
N = 100
plt.figure(figsize=(15, 2*(N+5)//5))
for i in range(N):
k = np.random.choice(len(aligned_lms))
# Make a subplot
plt.subplot((N + 5)//5, 5, i+1)
plt.scatter(aligned_lms[k][0], -aligned_lms[k][1])
plt.title(k, fontweight='bold')
# plt.axis('off')
plt.scatter(np.mean(aligned_lms[:, 0], axis=0), -np.mean(aligned_lms[:, 1], axis=0))
plt.scatter(np.std(aligned_lms[:, 0], axis=0), -np.std(aligned_lms[:, 1], axis=0))
```
# EXP 2: Cluster aligned landmarks
```
aligned_lms_wo_0.shape
# Reshape to make all 68 landmarks in each row
aligned_lms_wo_0_reshaped = np.reshape(aligned_lms_wo_0, (len(aligned_lms_wo_0), -1))
aligned_lms_wo_0_reshaped.shape
# Choose 10000 samples to fit on
fit_num = 10000
np.random.seed(29)
random_choice = np.random.choice(len(aligned_lms_wo_0_reshaped), fit_num, replace=False)
np.save('random_choice', random_choice)
random_choice = np.load('random_choice.npy')
random_choice
```
## Visualize landmarks using t-SNE
```
random_lip_landmarks_raw = aligned_lms_wo_0_reshaped[random_choice]
%time random_lip_landmarks_raw_tsne = TSNE(n_components=2, verbose=1).fit_transform(random_lip_landmarks_raw)
np.save('random_lip_landmarks_raw', random_lip_landmarks_raw)
# 1000 points
plt.scatter(random_lip_landmarks_raw_tsne[:, 0], random_lip_landmarks_raw_tsne[:, 1], s=3)
display_num = 1000
np.random.seed(29)
random_choice_to_display = np.random.choice(len(random_lip_landmarks_raw_tsne), display_num, replace=False)
np.save('random_choice_to_display', random_choice_to_display)
# np.load('random_choice_to_display.npy')
random_lip_landmarks_raw_tsne_to_display = random_lip_landmarks_raw_tsne[random_choice_to_display]
plt.scatter(random_lip_landmarks_raw_tsne_to_display[:, 0], random_lip_landmarks_raw_tsne_to_display[:, 1], s=3)
```
Comparison of clustering algorithms - http://scikit-learn.org/stable/auto_examples/cluster/plot_cluster_comparison.html#sphx-glr-auto-examples-cluster-plot-cluster-comparison-py
## 1. Apply Spectral Clustering
Spectral Clustering - http://scikit-learn.org/stable/modules/clustering.html#spectral-clustering
```
import sklearn.cluster
spectral_cluster_params = {
'n_clusters' : 18,
'eigen_solver' : None,
'affinity' : 'nearest_neighbors',
'n_neighbors' : 10,
'assign_labels' : 'discretize'
}
spectral = sklearn.cluster.SpectralClustering(n_clusters=spectral_cluster_params['n_clusters'],
eigen_solver=spectral_cluster_params['eigen_solver'],
affinity=spectral_cluster_params['affinity'],
n_neighbors=spectral_cluster_params['n_neighbors'],
assign_labels=spectral_cluster_params['assign_labels'])
# Fit
%time spectral.fit(aligned_lms_wo_0_reshaped[random_choice])
# Save cluster
joblib.dump(spectral, 'spectral_cluster_of_aligned_lip_lms.pkl', compress=3)
# Predict labels
spectral_labels = spectral.labels_.astype(np.int)
unique_spectral_labels = np.unique(spectral_labels)
print(unique_spectral_labels)
# Cluster centres
spectral_cluster_centers = []
for i in range(len(unique_spectral_labels)):
spectral_cluster_centers.append(np.mean(aligned_lms_wo_0_reshaped[random_choice][spectral_labels == i], axis=0))
spectral_cluster_centers = np.array(spectral_cluster_centers)
# np.save('spectral_cluster_centers', spectral_cluster_centers)
spectral_cluster_centers = np.load('spectral_cluster_centers.npy')
fig = plt.figure(figsize=(15, 5))
for i in range(18):
ax = fig.add_subplot(3, 6, i+1)
c = spectral_cluster_centers[i].reshape(2, 20)
plt.scatter(c[0], -c[1])
```
## Visualize landmarks clusters using t-SNE
```
# Plot tSNE clusters
random_spectral_labels = gmm_labels[random_choice_to_display]
random_spectral_cluster_centers = []
for i in range(len(unique_spectral_labels)):
random_spectral_cluster_centers.append(np.mean(random_lip_landmarks_raw_tsne_to_display[random_spectral_labels == i], axis=0))
random_spectral_cluster_centers = np.array(random_spectral_cluster_centers)
plt.scatter(random_lip_landmarks_raw_tsne_to_display[:, 0], random_lip_landmarks_raw_tsne_to_display[:, 1], s=3, c=random_spectral_labels)
plt.scatter(random_spectral_cluster_centers[:, 0], random_spectral_cluster_centers[:, 1], s=15, c='r')
```
## Convert lip landmarks to cluster labels
```
spectral_dists = []
for spectral_cluster_center in tqdm.tqdm(spectral_cluster_centers):
spectral_dists.append(np.linalg.norm(aligned_lms.reshape(-1, 40) - spectral_cluster_center.reshape(1, 40), axis=1))
spectral_cluster_labels_aligned_lms = np.argmin(np.array(spectral_dists), axis=0)
np.save('spectral_cluster_labels_aligned_lms', spectral_cluster_labels_aligned_lms)
```
## 2. Apply Gaussian Mixture Model
GMM - http://scikit-learn.org/stable/modules/generated/sklearn.mixture.GaussianMixture.html#sklearn.mixture.GaussianMixture
```
import sklearn.mixture
gmm_params = {
'n_clusters' : 18,
'covariance_type' : 'full',
}
gmm = sklearn.mixture.GaussianMixture(n_components=gmm_params['n_clusters'],
covariance_type=gmm_params['covariance_type'])
%time gmm.fit(aligned_lms_wo_0_reshaped[random_choice])
# Save cluster
joblib.dump(gmm, 'gaussian_mixture_of_aligned_lip_lms.pkl', compress=3)
# Predict labels
gmm_labels = gmm.predict(aligned_lms_wo_0_reshaped[random_choice]).astype(np.int)
unique_gmm_labels = np.unique(gmm_labels)
print(unique_gmm_labels)
# Cluster centres
gmm_cluster_centers = []
for i in range(len(unique_gmm_labels)):
gmm_cluster_centers.append(np.mean(aligned_lms_wo_0_reshaped[random_choice][gmm_labels == i], axis=0))
gmm_cluster_centers = np.array(gmm_cluster_centers)
np.save('gmm_cluster_centers', gmm_cluster_centers)
gmm_cluster_centers = np.load('gmm_cluster_centers.npy')
fig = plt.figure(figsize=(15, 5))
for i in range(18):
ax = fig.add_subplot(3, 6, i+1)
c = gmm_cluster_centers[i].reshape(2, 20)
plt.scatter(c[0], -c[1])
```
## Visualize landmarks clusters using t-SNE
```
# Plot tSNE clusters
random_gmm_labels = gmm_labels[random_choice_to_display]
random_gmm_cluster_centers = []
for i in range(len(unique_gmm_labels)):
random_gmm_cluster_centers.append(np.mean(random_lip_landmarks_raw_tsne_to_display[random_gmm_labels == i], axis=0))
random_gmm_cluster_centers = np.array(random_gmm_cluster_centers)
plt.scatter(random_lip_landmarks_raw_tsne_to_display[:, 0], random_lip_landmarks_raw_tsne_to_display[:, 1], s=3, c=random_gmm_labels)
plt.scatter(random_gmm_cluster_centers[:, 0], random_gmm_cluster_centers[:, 1], s=15, c='r')
```
## Convert lip landmarks to cluster labels
```
gmm_dists = []
for gmm_cluster_center in tqdm.tqdm(gmm_cluster_centers):
gmm_dists.append(np.linalg.norm(aligned_lms.reshape(-1, 40) - gmm_cluster_center.reshape(1, 40), axis=1))
gmm_cluster_labels_aligned_lms = np.argmin(np.array(gmm_dists), axis=0)
np.save('gmm_cluster_labels_aligned_lms', gmm_cluster_labels_aligned_lms)
# Save as one-hot-encoded
gmm_cluster_labels_aligned_lms_one_hot_encoded = np.eye(18)[gmm_cluster_labels_aligned_lms]
np.save('gmm_cluster_labels_aligned_lms_one_hot_encoded', gmm_cluster_labels_aligned_lms_one_hot_encoded)
```
# SCRATCHPAD
## Alignment
```
angles = []
for lm in tqdm.tqdm(lip_lms):
angle = np.arctan((lm[1, 6] - lm[1, 0])/(lm[0, 6] - lm[0, 0] + 1e-8))
angles.append(angle)
# print(angle)
# break
angles = np.array(angles)
angles.shape
print(angles[0])
rot_lm = np.dot([[np.cos(angles[0]), np.sin(angles[0])], [-np.sin(angles[0]), np.cos(angles[0])]], lip_lms[0])
print(np.arctan((-rot_lm[1, 6] + rot_lm[1, 0])/(rot_lm[0, 6] - rot_lm[0, 0] + 1e-8)))
plt.scatter(rot_lm[0], -rot_lm[1])
aligned_lm = (rot_lm - rot_lm[:, 0].reshape(2, 1)) / (np.max(rot_lm[0]) - np.min(rot_lm[0])) * 2 - np.array([[1], [0]])
plt.scatter(aligned_lm[0], -aligned_lm[1])
def align_lm(lm):
angle = np.arctan((lm[1, 6] - lm[1, 0])/(lm[0, 6] - lm[0, 0] + 1e-8))
rot_lm = np.dot([[np.cos(angle), np.sin(angle)], [-np.sin(angle), np.cos(angle)]], lm)
aligned_lm = (rot_lm - rot_lm[:, 0].reshape(2, 1)) / (np.max(rot_lm[0]) - np.min(rot_lm[0])) * 2 - np.array([[1], [0]])
return aligned_lm
```
|
github_jupyter
|
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import tqdm
from sklearn.externals import joblib
import sys
sys.path.append('../')
import utils
kp = sio.loadmat('vid_kp_concat_keys.mat')['vidKp']
kp.shape
lip_lms = np.reshape(kp, (-1, 2, 20))
lip_lms.shape
plt.scatter(lip_lms[0][0], -lip_lms[0][1])
lip_lms_wrong = np.reshape(kp, (-1, 20, 2))
lip_lms_wrong.shape
plt.scatter(lip_lms_wrong[0][:, 0], -lip_lms_wrong[0][:, 1])
plt.scatter(np.mean(lip_lms[:, 0], axis=0), -np.mean(lip_lms[:, 1], axis=0))
plt.scatter(np.std(lip_lms[:, 0], axis=0), -np.std(lip_lms[:, 1], axis=0))
def find_eigenvalues_and_eigenvectors_simply(A):
L = 1 / len(A.T) * np.dot(A, A.T)
e, u = np.linalg.eig(L)
w = e
v = np.dot(A.T, u)
return w, v
def find_eigenvalues_and_eigenvectors(A):
return np.linalg.eig(1 / len(A) * np.dot(A.T, A))
eigenvalues, eigenvectors = find_eigenvalues_and_eigenvectors(kp)
print(eigenvalues.shape, eigenvectors.shape)
print(eigenvectors)
# Number of eigenfaces to be plotted
N = 40
plt.figure(figsize=(15, 2*(N+5)//5))
for i in range(N):
# Make a subplot
plt.subplot((N + 5)//5, 5, i+1)
# Remember eigenvectors are **columns** in the matrix
plt.scatter(np.reshape(eigenvectors[:, i].T, (2, 20))[0], -np.reshape(eigenvectors[:, i].T, (2, 20))[1])
plt.title(i)
# plt.axis('off')
# Plot r vs M
# Values of M to consider: 1, 2,..., n
M = np.array(range(1, len(eigenvalues) + 1))
# Calculate r for all values of M
r = np.cumsum(eigenvalues)/np.sum(eigenvalues)
# Plot r vs M
plt.plot(M, r)
plt.xlabel("M", fontsize=20)
plt.ylabel("r", fontsize=20)
plt.grid("on")
plt.show()
count = 0
zero_i = []
for i, lm in tqdm.tqdm(enumerate(lip_lms), total=len(lip_lms)):
if np.sum(lm) == 0:
zero_i.append(i)
count += 1
print(count)
lip_lms_wo_0 = np.delete(lip_lms, zero_i, axis=0)
lip_lms_wo_0.shape
def align_lm(lm):
angle = np.arctan((lm[1, 6] - lm[1, 0])/(lm[0, 6] - lm[0, 0] + 1e-8))
rot_lm = np.dot([[np.cos(angle), np.sin(angle)], [-np.sin(angle), np.cos(angle)]], lm)
aligned_lm = (rot_lm - rot_lm[:, 0].reshape(2, 1)) / (np.max(rot_lm[0]) - np.min(rot_lm[0]) + 1e-8) * 2 - np.array([[1], [0]])
aligned_lm[aligned_lm > 1.] = 1.
aligned_lm[aligned_lm < -1.] = -1.
return aligned_lm
aligned_lms_wo_0 = []
for lm in tqdm.tqdm(lip_lms_wo_0):
aligned_lms_wo_0.append(align_lm(lm))
aligned_lms_wo_0 = np.array(aligned_lms_wo_0)
# # np.save('vid_kp_concat_keys_aligned', aligned_lms)
# sio.savemat('vid_kp_concat_keys_aligned_wo_0.mat', {'vidKp': aligned_lms_wo_0.reshape(-1, 40)})
aligned_lms_wo_0 = sio.loadmat('vid_kp_concat_keys_aligned_wo_0.mat')['vidKp'].reshape(-1, 2, 20)
aligned_lms = []
for lm in tqdm.tqdm(lip_lms):
aligned_lms.append(align_lm(lm))
aligned_lms = np.array(aligned_lms)
# # np.save('vid_kp_concat_keys_aligned', aligned_lms)
# sio.savemat('vid_kp_concat_keys_aligned.mat', {'vidKp': aligned_lms.reshape(-1, 40)})
aligned_lms = sio.loadmat('vid_kp_concat_keys_aligned.mat')['vidKp'].reshape(-1, 2, 20)
eigenvalues_aligned, eigenvectors_aligned = find_eigenvalues_and_eigenvectors(aligned_lms.reshape(-1, 40))
# Number of eigenfaces to be plotted
N = 40
plt.figure(figsize=(15, 2*(N+5)//5))
for i in range(N):
# Make a subplot
plt.subplot((N + 5)//5, 5, i+1)
# Remember eigenvectors are **columns** in the matrix
plt.scatter(np.reshape(eigenvectors_aligned[:, i].T, (2, 20))[0], np.reshape(eigenvectors_aligned[:, i].T, (2, 20))[1])
plt.title(i)
# plt.axis('off')
# Plot r vs M
# Values of M to consider: 1, 2,..., n
M = np.array(range(1, len(eigenvalues_aligned) + 1))
# Calculate r for all values of M
r = np.cumsum(eigenvalues_aligned)/np.sum(eigenvalues_aligned)
# Plot r vs M
plt.plot(M, r)
plt.xlabel("M", fontsize=20)
plt.ylabel("r", fontsize=20)
plt.grid("on")
plt.show()
# Number of lips to be plotted
N = 100
plt.figure(figsize=(15, 2*(N+5)//5))
for i in range(N):
k = np.random.choice(len(aligned_lms))
# Make a subplot
plt.subplot((N + 5)//5, 5, i+1)
plt.scatter(aligned_lms[k][0], -aligned_lms[k][1])
plt.title(k, fontweight='bold')
# plt.axis('off')
plt.scatter(np.mean(aligned_lms[:, 0], axis=0), -np.mean(aligned_lms[:, 1], axis=0))
plt.scatter(np.std(aligned_lms[:, 0], axis=0), -np.std(aligned_lms[:, 1], axis=0))
aligned_lms_wo_0.shape
# Reshape to make all 68 landmarks in each row
aligned_lms_wo_0_reshaped = np.reshape(aligned_lms_wo_0, (len(aligned_lms_wo_0), -1))
aligned_lms_wo_0_reshaped.shape
# Choose 10000 samples to fit on
fit_num = 10000
np.random.seed(29)
random_choice = np.random.choice(len(aligned_lms_wo_0_reshaped), fit_num, replace=False)
np.save('random_choice', random_choice)
random_choice = np.load('random_choice.npy')
random_choice
random_lip_landmarks_raw = aligned_lms_wo_0_reshaped[random_choice]
%time random_lip_landmarks_raw_tsne = TSNE(n_components=2, verbose=1).fit_transform(random_lip_landmarks_raw)
np.save('random_lip_landmarks_raw', random_lip_landmarks_raw)
# 1000 points
plt.scatter(random_lip_landmarks_raw_tsne[:, 0], random_lip_landmarks_raw_tsne[:, 1], s=3)
display_num = 1000
np.random.seed(29)
random_choice_to_display = np.random.choice(len(random_lip_landmarks_raw_tsne), display_num, replace=False)
np.save('random_choice_to_display', random_choice_to_display)
# np.load('random_choice_to_display.npy')
random_lip_landmarks_raw_tsne_to_display = random_lip_landmarks_raw_tsne[random_choice_to_display]
plt.scatter(random_lip_landmarks_raw_tsne_to_display[:, 0], random_lip_landmarks_raw_tsne_to_display[:, 1], s=3)
import sklearn.cluster
spectral_cluster_params = {
'n_clusters' : 18,
'eigen_solver' : None,
'affinity' : 'nearest_neighbors',
'n_neighbors' : 10,
'assign_labels' : 'discretize'
}
spectral = sklearn.cluster.SpectralClustering(n_clusters=spectral_cluster_params['n_clusters'],
eigen_solver=spectral_cluster_params['eigen_solver'],
affinity=spectral_cluster_params['affinity'],
n_neighbors=spectral_cluster_params['n_neighbors'],
assign_labels=spectral_cluster_params['assign_labels'])
# Fit
%time spectral.fit(aligned_lms_wo_0_reshaped[random_choice])
# Save cluster
joblib.dump(spectral, 'spectral_cluster_of_aligned_lip_lms.pkl', compress=3)
# Predict labels
spectral_labels = spectral.labels_.astype(np.int)
unique_spectral_labels = np.unique(spectral_labels)
print(unique_spectral_labels)
# Cluster centres
spectral_cluster_centers = []
for i in range(len(unique_spectral_labels)):
spectral_cluster_centers.append(np.mean(aligned_lms_wo_0_reshaped[random_choice][spectral_labels == i], axis=0))
spectral_cluster_centers = np.array(spectral_cluster_centers)
# np.save('spectral_cluster_centers', spectral_cluster_centers)
spectral_cluster_centers = np.load('spectral_cluster_centers.npy')
fig = plt.figure(figsize=(15, 5))
for i in range(18):
ax = fig.add_subplot(3, 6, i+1)
c = spectral_cluster_centers[i].reshape(2, 20)
plt.scatter(c[0], -c[1])
# Plot tSNE clusters
random_spectral_labels = gmm_labels[random_choice_to_display]
random_spectral_cluster_centers = []
for i in range(len(unique_spectral_labels)):
random_spectral_cluster_centers.append(np.mean(random_lip_landmarks_raw_tsne_to_display[random_spectral_labels == i], axis=0))
random_spectral_cluster_centers = np.array(random_spectral_cluster_centers)
plt.scatter(random_lip_landmarks_raw_tsne_to_display[:, 0], random_lip_landmarks_raw_tsne_to_display[:, 1], s=3, c=random_spectral_labels)
plt.scatter(random_spectral_cluster_centers[:, 0], random_spectral_cluster_centers[:, 1], s=15, c='r')
spectral_dists = []
for spectral_cluster_center in tqdm.tqdm(spectral_cluster_centers):
spectral_dists.append(np.linalg.norm(aligned_lms.reshape(-1, 40) - spectral_cluster_center.reshape(1, 40), axis=1))
spectral_cluster_labels_aligned_lms = np.argmin(np.array(spectral_dists), axis=0)
np.save('spectral_cluster_labels_aligned_lms', spectral_cluster_labels_aligned_lms)
import sklearn.mixture
gmm_params = {
'n_clusters' : 18,
'covariance_type' : 'full',
}
gmm = sklearn.mixture.GaussianMixture(n_components=gmm_params['n_clusters'],
covariance_type=gmm_params['covariance_type'])
%time gmm.fit(aligned_lms_wo_0_reshaped[random_choice])
# Save cluster
joblib.dump(gmm, 'gaussian_mixture_of_aligned_lip_lms.pkl', compress=3)
# Predict labels
gmm_labels = gmm.predict(aligned_lms_wo_0_reshaped[random_choice]).astype(np.int)
unique_gmm_labels = np.unique(gmm_labels)
print(unique_gmm_labels)
# Cluster centres
gmm_cluster_centers = []
for i in range(len(unique_gmm_labels)):
gmm_cluster_centers.append(np.mean(aligned_lms_wo_0_reshaped[random_choice][gmm_labels == i], axis=0))
gmm_cluster_centers = np.array(gmm_cluster_centers)
np.save('gmm_cluster_centers', gmm_cluster_centers)
gmm_cluster_centers = np.load('gmm_cluster_centers.npy')
fig = plt.figure(figsize=(15, 5))
for i in range(18):
ax = fig.add_subplot(3, 6, i+1)
c = gmm_cluster_centers[i].reshape(2, 20)
plt.scatter(c[0], -c[1])
# Plot tSNE clusters
random_gmm_labels = gmm_labels[random_choice_to_display]
random_gmm_cluster_centers = []
for i in range(len(unique_gmm_labels)):
random_gmm_cluster_centers.append(np.mean(random_lip_landmarks_raw_tsne_to_display[random_gmm_labels == i], axis=0))
random_gmm_cluster_centers = np.array(random_gmm_cluster_centers)
plt.scatter(random_lip_landmarks_raw_tsne_to_display[:, 0], random_lip_landmarks_raw_tsne_to_display[:, 1], s=3, c=random_gmm_labels)
plt.scatter(random_gmm_cluster_centers[:, 0], random_gmm_cluster_centers[:, 1], s=15, c='r')
gmm_dists = []
for gmm_cluster_center in tqdm.tqdm(gmm_cluster_centers):
gmm_dists.append(np.linalg.norm(aligned_lms.reshape(-1, 40) - gmm_cluster_center.reshape(1, 40), axis=1))
gmm_cluster_labels_aligned_lms = np.argmin(np.array(gmm_dists), axis=0)
np.save('gmm_cluster_labels_aligned_lms', gmm_cluster_labels_aligned_lms)
# Save as one-hot-encoded
gmm_cluster_labels_aligned_lms_one_hot_encoded = np.eye(18)[gmm_cluster_labels_aligned_lms]
np.save('gmm_cluster_labels_aligned_lms_one_hot_encoded', gmm_cluster_labels_aligned_lms_one_hot_encoded)
angles = []
for lm in tqdm.tqdm(lip_lms):
angle = np.arctan((lm[1, 6] - lm[1, 0])/(lm[0, 6] - lm[0, 0] + 1e-8))
angles.append(angle)
# print(angle)
# break
angles = np.array(angles)
angles.shape
print(angles[0])
rot_lm = np.dot([[np.cos(angles[0]), np.sin(angles[0])], [-np.sin(angles[0]), np.cos(angles[0])]], lip_lms[0])
print(np.arctan((-rot_lm[1, 6] + rot_lm[1, 0])/(rot_lm[0, 6] - rot_lm[0, 0] + 1e-8)))
plt.scatter(rot_lm[0], -rot_lm[1])
aligned_lm = (rot_lm - rot_lm[:, 0].reshape(2, 1)) / (np.max(rot_lm[0]) - np.min(rot_lm[0])) * 2 - np.array([[1], [0]])
plt.scatter(aligned_lm[0], -aligned_lm[1])
def align_lm(lm):
angle = np.arctan((lm[1, 6] - lm[1, 0])/(lm[0, 6] - lm[0, 0] + 1e-8))
rot_lm = np.dot([[np.cos(angle), np.sin(angle)], [-np.sin(angle), np.cos(angle)]], lm)
aligned_lm = (rot_lm - rot_lm[:, 0].reshape(2, 1)) / (np.max(rot_lm[0]) - np.min(rot_lm[0])) * 2 - np.array([[1], [0]])
return aligned_lm
| 0.500244 | 0.829768 |
### Note
* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
```
# Dependencies and Setup
import pandas as pd
# File to Load (Remember to Change These)
school_data_to_load = "Resources/schools_complete.csv"
student_data_to_load = "Resources/students_complete.csv"
# Read School and Student Data File and store into Pandas DataFrames
school_data = pd.read_csv(school_data_to_load)
student_data = pd.read_csv(student_data_to_load)
# Combine the data into a single dataset.
school_data_complete = pd.merge(student_data, school_data, how="left", on=["school_name", "school_name"])
school_data_complete
```
## District Summary
* Calculate the total number of schools
* Calculate the total number of students
* Calculate the total budget
* Calculate the average math score
* Calculate the average reading score
* Calculate the percentage of students with a passing math score (70 or greater)
* Calculate the percentage of students with a passing reading score (70 or greater)
* Calculate the percentage of students who passed math **and** reading (% Overall Passing)
* Create a dataframe to hold the above results
* Optional: give the displayed data cleaner formatting
```
# Total number of schools
total_schools = school_data_complete["school_name"].nunique()
# Total number of students
total_students = school_data_complete["Student ID"].count()
# Total budget
total_budget = sum(school_data_complete["budget"].unique())
# Average math score
average_math = school_data_complete["math_score"].mean()
# Average reading score
average_reading = school_data_complete["reading_score"].mean()
# Percentage of students with passing math score 70 or higher
passing_math = (school_data_complete["math_score"] >= 70).mean() * 100
# Percentage of students with passing reading score 70 or higher
passing_reading = (school_data_complete["reading_score"] >= 70).mean() * 100
# Percentage of students who passed math and reading(% Overall Passing)
passing_readandmath = ((school_data_complete["reading_score"] >= 70) & (school_data_complete["math_score"] >= 70)).mean() * 100
# Creating district_summary_df
district_summary_df = pd.DataFrame({"Total Schools":[total_schools],
"Total Students":[total_students],
"Total Budget":[total_budget],
"Average Math Score":[average_math],
"Average Reading Score":[average_reading],
"% Passing Math":[passing_math],
"% Passing Reading":[passing_reading],
"% Overall Passing":[passing_readandmath]
})
district_summary_df
# Cleaning up formatting with .map
district_summary_df["Total Students"] = district_summary_df["Total Students"].map("{:,}".format)
district_summary_df["Total Budget"] = district_summary_df["Total Budget"].map("${:,.2f}".format)
district_summary_df["Average Math Score"] = district_summary_df["Average Math Score"].map("{:.6f}".format)
district_summary_df["Average Reading Score"] = district_summary_df["Average Reading Score"].map("{:.5f}".format)
district_summary_df["% Passing Math"] = district_summary_df["% Passing Math"].map("{:.6f}".format)
district_summary_df["% Passing Reading"] = district_summary_df["% Passing Reading"].map("{:.6f}".format)
district_summary_df["% Overall Passing"] = district_summary_df["% Overall Passing"].map("{:.6f}".format)
district_summary_df
```
## School Summary
* Create an overview table that summarizes key metrics about each school, including:
* School Name
* School Type
* Total Students
* Total School Budget
* Per Student Budget
* Average Math Score
* Average Reading Score
* % Passing Math
* % Passing Reading
* % Overall Passing (The percentage of students that passed math **and** reading.)
* Create a dataframe to hold the above results
```
# Groupby school_name
by_school = school_data_complete.groupby(["school_name"])
# School Type
school_type = by_school["type"].first()
# Total Students
tot_students = by_school.size()
# Total School Budget
tot_school_budget = by_school["budget"].first()
# Per Student Budget
tot_per_stu = tot_school_budget/tot_students
# Average Math Score
avg_math_score = by_school["math_score"].mean()
# Average Reading Score
avg_reading_score = by_school["reading_score"].mean()
# 1 pull all students greater than or equal to 70 for math_score
# Count the number of students, and group by school name, & divide by total population of students
passing_math2 = school_data_complete[(school_data_complete["math_score"]>=70)]
percent_math2 = (passing_math2.groupby(["school_name"]).count()['student_name'] / tot_students)*100
# 1 pull all students greater than or equal to 70 for reading_score
# Count the number of students, and group by school name, & divide by total population of students
passing_reading2 = school_data_complete[(school_data_complete["reading_score"]>=70)]
percent_reading2 = (passing_reading2.groupby(["school_name"]).count()['student_name']/ tot_students)*100
# 1 pull all students greater than or equal to 70 for both math and reading
# Count the number of students, and group by school name, & divide by total population of students
passing_mathread = school_data_complete[(school_data_complete["math_score"]>=70)&(school_data_complete["reading_score"]>=70)]
percent_mathread = (passing_mathread.groupby(["school_name"]).count()['student_name']/ tot_students)*100
# Created School Summary DataFrame
school_summary_df = pd.DataFrame({
"School Type":school_type,
"Total Students":tot_students,
"Total School Budget":tot_school_budget,
"Per Student Budget":tot_per_stu,
"Average Math Score":avg_math_score,
"Average Reading Score":avg_reading_score,
"% Passing Math":percent_math2,
"% Passing Reading":percent_reading2,
"% Overall Passing": percent_mathread
})
# Cleaning up formatting with .map
school_summary_df["Total School Budget"] = school_summary_df["Total School Budget"].map("${:,.2f}".format)
school_summary_df["Per Student Budget"] = school_summary_df["Per Student Budget"].map("${:.2f}".format)
school_summary_df
```
## Top Performing Schools (By % Overall Passing)
* Sort and display the top five performing schools by % overall passing.
```
# Using sort_values, display top 5 performing schools b 5 overall passing
top_sort_overall = school_summary_df.sort_values(["% Overall Passing"],ascending = False)
top_sort_overall.head()
```
## Bottom Performing Schools (By % Overall Passing)
* Sort and display the five worst-performing schools by % overall passing.
```
# Using sort_values, display 5 worst performing schools by % overall passing
worst_sort_overall = school_summary_df.sort_values(["% Overall Passing"],ascending = True)
worst_sort_overall.head()
```
## Math Scores by Grade
* Create a table that lists the average Reading Score for students of each grade level (9th, 10th, 11th, 12th) at each school.
* Create a pandas series for each grade. Hint: use a conditional statement.
* Group each series by school
* Combine the series into a dataframe
* Optional: give the displayed data cleaner formatting
```
# Create a panda series for ninth grade using a conditional statement
# Groupby school_name and find the average math_score
ninth = school_data_complete[(school_data_complete["grade"]=='9th')]
ninth_grade = ninth.groupby(["school_name"]).mean()["math_score"]
# Create a panda series for tenth grade using a conditional statement
# Groupby school_name and find the average math_score
tenth = school_data_complete[(school_data_complete["grade"]=='10th')]
tenth_grade = tenth.groupby(["school_name"]).mean()["math_score"]
# Create a panda series for eleventh grade using a conditional statement
# Groupby school_name and find the average math_score
eleventh = school_data_complete[(school_data_complete["grade"]=='11th')]
eleventh_grade = eleventh.groupby(["school_name"]).mean()["math_score"]
# Create a panda series for twelfth grade using a conditional statement
# Groupby school_name and find the average math_score
twelfth = school_data_complete[(school_data_complete["grade"]=='12th')]
twelfth_grade = twelfth.groupby(["school_name"]).mean()["math_score"]
# Create a new dataframe to hold the series data
math_scores_bygrade_df = pd.DataFrame({
"9th":ninth_grade,
"10th":tenth_grade,
"11th":eleventh_grade,
"12th":twelfth_grade
})
math_scores_bygrade_df
```
## Reading Score by Grade
* Perform the same operations as above for reading scores
```
# Create a panda series for ninth grade using a conditional statement
# Groupby school_name and find the average reading_score
ninth_r = school_data_complete[(school_data_complete["grade"]=='9th')]
ninth_grade_r = ninth_r.groupby(["school_name"]).mean()["reading_score"]
# Create a panda series for tenth grade using a conditional statement
# Groupby school_name and find the average reading_score
tenth_r = school_data_complete[(school_data_complete["grade"]=='10th')]
tenth_grade_r = tenth_r.groupby(["school_name"]).mean()["reading_score"]
# Create a panda series for eleventh grade using a conditional statement
# Groupby school_name and find the average reading_score
eleventh_r = school_data_complete[(school_data_complete["grade"]=='11th')]
eleventh_grade_r = eleventh_r.groupby(["school_name"]).mean()["reading_score"]
# Create a panda series for twelfth grade using a conditional statement
# Groupby school_name and find the average reading_score
twelfth_r = school_data_complete[(school_data_complete["grade"]=='12th')]
twelfth_grade_r = twelfth_r.groupby(["school_name"]).mean()["reading_score"]
reading_scores_bygrade_df = pd.DataFrame({
"9th":ninth_grade_r,
"10th":tenth_grade_r,
"11th":eleventh_grade_r,
"12th":twelfth_grade_r
})
reading_scores_bygrade_df
```
## Scores by School Spending
* Create a table that breaks down school performances based on average Spending Ranges (Per Student). Use 4 reasonable bins to group school spending. Include in the table each of the following:
* Average Math Score
* Average Reading Score
* % Passing Math
* % Passing Reading
* Overall Passing Rate (Average of the above two)
```
# Make bins
# Make group_names for labels
spending_ranges_bins = [0,585,630,645,675]
group_names = ["<$584","$585-629","$630-644", "$645-675"]
# Categorize the data frame using bins
scores_by_school = school_summary_df
scores_by_school["Spending Ranges (Per Student)"] = pd.cut(tot_per_stu, spending_ranges_bins, labels = group_names)
# Average Math Score
average_math = scores_by_school.groupby(["Spending Ranges (Per Student)"]).mean()["Average Math Score"]
# Average Reading Score
average_reading = scores_by_school.groupby(["Spending Ranges (Per Student)"]).mean()["Average Reading Score"]
# % Passing Math
percent_math = scores_by_school.groupby(["Spending Ranges (Per Student)"]).mean()["% Passing Math"]
# % Passing Reading
percent_reading = scores_by_school.groupby(["Spending Ranges (Per Student)"]).mean()["% Passing Reading"]
# % Overall Passing
percent_overall = scores_by_school.groupby(["Spending Ranges (Per Student)"]).mean()["% Overall Passing"]
# Create DataFrame to store this new information
scores_by_spending_df = pd.DataFrame({
"Average Math Score":average_math,
"Average Reading Score":average_reading,
"% Passing Math":percent_math,
"% Passing Reading":percent_reading,
"% Overall Passing":percent_overall
})
# Cleaning up formatting with .map
scores_by_spending_df["Average Math Score"] = scores_by_spending_df["Average Math Score"].map("{:.2f}".format)
scores_by_spending_df["Average Reading Score"] = scores_by_spending_df["Average Reading Score"].map("{:.2f}".format)
scores_by_spending_df["% Passing Math"] = scores_by_spending_df["% Passing Math"].map("{:.2f}".format)
scores_by_spending_df["% Passing Reading"] = scores_by_spending_df["% Passing Reading"].map("{:.2f}".format)
scores_by_spending_df["% Overall Passing"] = scores_by_spending_df["% Overall Passing"].map("{:.2f}".format)
scores_by_spending_df
```
## Scores by School Size
* Perform the same operations as above, based on school size.
```
# Make bins
# Make group_names for labels
size_range_bins = [0,1000,2000,5000]
s_group_names = ["Small (<1000)","Medium (1000-2000)","Large (2000-5000)"]
# Categorize the data frame using bins
scores_by_size = school_summary_df
scores_by_size["Size Ranges"] = pd.cut(tot_students, size_range_bins, labels = s_group_names)
# Average Math Score
size_average_math = scores_by_size.groupby(["Size Ranges"]).mean()["Average Math Score"]
# Average Reading Score
size_average_reading = scores_by_size.groupby(["Size Ranges"]).mean()["Average Reading Score"]
# % Passing Math
size_percent_math = scores_by_size.groupby(["Size Ranges"]).mean()["% Passing Math"]
# % Passing Reading
size_percent_reading = scores_by_size.groupby(["Size Ranges"]).mean()["% Passing Reading"]
# % Overall Passing
size_percent_overall = scores_by_size.groupby(["Size Ranges"]).mean()["% Overall Passing"]
# Create DataFrame to store this new information
scores_by_size_df = pd.DataFrame({
"Average Math Score":size_average_math,
"Average Reading Score":size_average_reading,
"% Passing Math":size_percent_math,
"% Passing Reading":size_percent_reading,
"% Overall Passing":size_percent_overall
})
# Cleaning up formatting with .map
scores_by_size_df["Average Math Score"] = scores_by_size_df["Average Math Score"].map("{:.2f}".format)
scores_by_size_df["Average Reading Score"] = scores_by_size_df["Average Reading Score"].map("{:.2f}".format)
scores_by_size_df["% Passing Math"] = scores_by_size_df["% Passing Math"].map("{:.2f}".format)
scores_by_size_df["% Passing Reading"] = scores_by_size_df["% Passing Reading"].map("{:.2f}".format)
scores_by_size_df["% Overall Passing"] = scores_by_size_df["% Overall Passing"].map("{:.2f}".format)
scores_by_size_df
```
## Scores by School Type
* Perform the same operations as above, based on school type
```
# Groupby school_type
by_type = school_summary_df.groupby(["School Type"])
# Average Math Score
type_avg_math_score = by_type["Average Math Score"].mean()
# Average Reading Score
type_avg_reading_score = by_type["Average Reading Score"].mean()
# % Passing Math
type_percent_math_score = by_type["% Passing Math"].mean()
# % Passing Reading
type_percent_reading_score = by_type["% Passing Reading"].mean()
# % Overall Passing
type_percent_overall_score = by_type["% Overall Passing"].mean()
# Create new dataframe to store school type data
scores_by_type_df = pd.DataFrame({
"Average Math Score":type_avg_math_score,
"Average Reading Score":type_avg_reading_score,
"% Passing Math":type_percent_math_score,
"% Passing Reading":type_percent_reading_score,
"% Overall Passing":type_percent_overall_score
})
# Cleaning up formatting with .map
scores_by_type_df["Average Math Score"] = scores_by_type_df["Average Math Score"].map("{:.2f}".format)
scores_by_type_df["Average Reading Score"] = scores_by_type_df["Average Reading Score"].map("{:.2f}".format)
scores_by_type_df["% Passing Math"] = scores_by_type_df["% Passing Math"].map("{:.2f}".format)
scores_by_type_df["% Passing Reading"] = scores_by_type_df["% Passing Reading"].map("{:.2f}".format)
scores_by_type_df["% Overall Passing"] = scores_by_type_df["% Overall Passing"].map("{:.2f}".format)
scores_by_type_df
```
PyCitySchools Analysis:
It would seem from comparing the different schools in this study by size and spending that bigger and better spending does not necessarily equal better student performance. It was interesting to compare the top five best performing schools with the 5 bottom performing schools in one section. The top five performing schools were smaller with fewer students and less overall and per student budgets, and yet, seemed to outperform those schools with more students and a larger overall spending budget. The smaller schools had an overall 93/94% passing math,95-97% passing reading, and an overall passing percent of around 90. The five bottom performing schools were all larger schools with more students and a larger spending budget. Despite the larger spending budgets, these schools only had around a 65% passing math, 81% passing reading, and only an overall 53% passing.
It was also interesting to find that the top five best performing schools were all charter schools. I think there would need to be more research done to understand the why of how charter outperformed district if there were reasons separate from school size and budget. I think it would be intersting to also compare class sizes per school or different teaching approaches/programs available to students in those schools that maybe contributed to a better success rate in the charter/smaller schools compared to the district/larger schools.
In conclusion, taking what we know from schools in a general sense is that despite smaller budgets, smaller schools have a much better teacher-student ratio in the classroom compared to their larger counterparts where teachers may not be able to better oversee student success.
|
github_jupyter
|
# Dependencies and Setup
import pandas as pd
# File to Load (Remember to Change These)
school_data_to_load = "Resources/schools_complete.csv"
student_data_to_load = "Resources/students_complete.csv"
# Read School and Student Data File and store into Pandas DataFrames
school_data = pd.read_csv(school_data_to_load)
student_data = pd.read_csv(student_data_to_load)
# Combine the data into a single dataset.
school_data_complete = pd.merge(student_data, school_data, how="left", on=["school_name", "school_name"])
school_data_complete
# Total number of schools
total_schools = school_data_complete["school_name"].nunique()
# Total number of students
total_students = school_data_complete["Student ID"].count()
# Total budget
total_budget = sum(school_data_complete["budget"].unique())
# Average math score
average_math = school_data_complete["math_score"].mean()
# Average reading score
average_reading = school_data_complete["reading_score"].mean()
# Percentage of students with passing math score 70 or higher
passing_math = (school_data_complete["math_score"] >= 70).mean() * 100
# Percentage of students with passing reading score 70 or higher
passing_reading = (school_data_complete["reading_score"] >= 70).mean() * 100
# Percentage of students who passed math and reading(% Overall Passing)
passing_readandmath = ((school_data_complete["reading_score"] >= 70) & (school_data_complete["math_score"] >= 70)).mean() * 100
# Creating district_summary_df
district_summary_df = pd.DataFrame({"Total Schools":[total_schools],
"Total Students":[total_students],
"Total Budget":[total_budget],
"Average Math Score":[average_math],
"Average Reading Score":[average_reading],
"% Passing Math":[passing_math],
"% Passing Reading":[passing_reading],
"% Overall Passing":[passing_readandmath]
})
district_summary_df
# Cleaning up formatting with .map
district_summary_df["Total Students"] = district_summary_df["Total Students"].map("{:,}".format)
district_summary_df["Total Budget"] = district_summary_df["Total Budget"].map("${:,.2f}".format)
district_summary_df["Average Math Score"] = district_summary_df["Average Math Score"].map("{:.6f}".format)
district_summary_df["Average Reading Score"] = district_summary_df["Average Reading Score"].map("{:.5f}".format)
district_summary_df["% Passing Math"] = district_summary_df["% Passing Math"].map("{:.6f}".format)
district_summary_df["% Passing Reading"] = district_summary_df["% Passing Reading"].map("{:.6f}".format)
district_summary_df["% Overall Passing"] = district_summary_df["% Overall Passing"].map("{:.6f}".format)
district_summary_df
# Groupby school_name
by_school = school_data_complete.groupby(["school_name"])
# School Type
school_type = by_school["type"].first()
# Total Students
tot_students = by_school.size()
# Total School Budget
tot_school_budget = by_school["budget"].first()
# Per Student Budget
tot_per_stu = tot_school_budget/tot_students
# Average Math Score
avg_math_score = by_school["math_score"].mean()
# Average Reading Score
avg_reading_score = by_school["reading_score"].mean()
# 1 pull all students greater than or equal to 70 for math_score
# Count the number of students, and group by school name, & divide by total population of students
passing_math2 = school_data_complete[(school_data_complete["math_score"]>=70)]
percent_math2 = (passing_math2.groupby(["school_name"]).count()['student_name'] / tot_students)*100
# 1 pull all students greater than or equal to 70 for reading_score
# Count the number of students, and group by school name, & divide by total population of students
passing_reading2 = school_data_complete[(school_data_complete["reading_score"]>=70)]
percent_reading2 = (passing_reading2.groupby(["school_name"]).count()['student_name']/ tot_students)*100
# 1 pull all students greater than or equal to 70 for both math and reading
# Count the number of students, and group by school name, & divide by total population of students
passing_mathread = school_data_complete[(school_data_complete["math_score"]>=70)&(school_data_complete["reading_score"]>=70)]
percent_mathread = (passing_mathread.groupby(["school_name"]).count()['student_name']/ tot_students)*100
# Created School Summary DataFrame
school_summary_df = pd.DataFrame({
"School Type":school_type,
"Total Students":tot_students,
"Total School Budget":tot_school_budget,
"Per Student Budget":tot_per_stu,
"Average Math Score":avg_math_score,
"Average Reading Score":avg_reading_score,
"% Passing Math":percent_math2,
"% Passing Reading":percent_reading2,
"% Overall Passing": percent_mathread
})
# Cleaning up formatting with .map
school_summary_df["Total School Budget"] = school_summary_df["Total School Budget"].map("${:,.2f}".format)
school_summary_df["Per Student Budget"] = school_summary_df["Per Student Budget"].map("${:.2f}".format)
school_summary_df
# Using sort_values, display top 5 performing schools b 5 overall passing
top_sort_overall = school_summary_df.sort_values(["% Overall Passing"],ascending = False)
top_sort_overall.head()
# Using sort_values, display 5 worst performing schools by % overall passing
worst_sort_overall = school_summary_df.sort_values(["% Overall Passing"],ascending = True)
worst_sort_overall.head()
# Create a panda series for ninth grade using a conditional statement
# Groupby school_name and find the average math_score
ninth = school_data_complete[(school_data_complete["grade"]=='9th')]
ninth_grade = ninth.groupby(["school_name"]).mean()["math_score"]
# Create a panda series for tenth grade using a conditional statement
# Groupby school_name and find the average math_score
tenth = school_data_complete[(school_data_complete["grade"]=='10th')]
tenth_grade = tenth.groupby(["school_name"]).mean()["math_score"]
# Create a panda series for eleventh grade using a conditional statement
# Groupby school_name and find the average math_score
eleventh = school_data_complete[(school_data_complete["grade"]=='11th')]
eleventh_grade = eleventh.groupby(["school_name"]).mean()["math_score"]
# Create a panda series for twelfth grade using a conditional statement
# Groupby school_name and find the average math_score
twelfth = school_data_complete[(school_data_complete["grade"]=='12th')]
twelfth_grade = twelfth.groupby(["school_name"]).mean()["math_score"]
# Create a new dataframe to hold the series data
math_scores_bygrade_df = pd.DataFrame({
"9th":ninth_grade,
"10th":tenth_grade,
"11th":eleventh_grade,
"12th":twelfth_grade
})
math_scores_bygrade_df
# Create a panda series for ninth grade using a conditional statement
# Groupby school_name and find the average reading_score
ninth_r = school_data_complete[(school_data_complete["grade"]=='9th')]
ninth_grade_r = ninth_r.groupby(["school_name"]).mean()["reading_score"]
# Create a panda series for tenth grade using a conditional statement
# Groupby school_name and find the average reading_score
tenth_r = school_data_complete[(school_data_complete["grade"]=='10th')]
tenth_grade_r = tenth_r.groupby(["school_name"]).mean()["reading_score"]
# Create a panda series for eleventh grade using a conditional statement
# Groupby school_name and find the average reading_score
eleventh_r = school_data_complete[(school_data_complete["grade"]=='11th')]
eleventh_grade_r = eleventh_r.groupby(["school_name"]).mean()["reading_score"]
# Create a panda series for twelfth grade using a conditional statement
# Groupby school_name and find the average reading_score
twelfth_r = school_data_complete[(school_data_complete["grade"]=='12th')]
twelfth_grade_r = twelfth_r.groupby(["school_name"]).mean()["reading_score"]
reading_scores_bygrade_df = pd.DataFrame({
"9th":ninth_grade_r,
"10th":tenth_grade_r,
"11th":eleventh_grade_r,
"12th":twelfth_grade_r
})
reading_scores_bygrade_df
# Make bins
# Make group_names for labels
spending_ranges_bins = [0,585,630,645,675]
group_names = ["<$584","$585-629","$630-644", "$645-675"]
# Categorize the data frame using bins
scores_by_school = school_summary_df
scores_by_school["Spending Ranges (Per Student)"] = pd.cut(tot_per_stu, spending_ranges_bins, labels = group_names)
# Average Math Score
average_math = scores_by_school.groupby(["Spending Ranges (Per Student)"]).mean()["Average Math Score"]
# Average Reading Score
average_reading = scores_by_school.groupby(["Spending Ranges (Per Student)"]).mean()["Average Reading Score"]
# % Passing Math
percent_math = scores_by_school.groupby(["Spending Ranges (Per Student)"]).mean()["% Passing Math"]
# % Passing Reading
percent_reading = scores_by_school.groupby(["Spending Ranges (Per Student)"]).mean()["% Passing Reading"]
# % Overall Passing
percent_overall = scores_by_school.groupby(["Spending Ranges (Per Student)"]).mean()["% Overall Passing"]
# Create DataFrame to store this new information
scores_by_spending_df = pd.DataFrame({
"Average Math Score":average_math,
"Average Reading Score":average_reading,
"% Passing Math":percent_math,
"% Passing Reading":percent_reading,
"% Overall Passing":percent_overall
})
# Cleaning up formatting with .map
scores_by_spending_df["Average Math Score"] = scores_by_spending_df["Average Math Score"].map("{:.2f}".format)
scores_by_spending_df["Average Reading Score"] = scores_by_spending_df["Average Reading Score"].map("{:.2f}".format)
scores_by_spending_df["% Passing Math"] = scores_by_spending_df["% Passing Math"].map("{:.2f}".format)
scores_by_spending_df["% Passing Reading"] = scores_by_spending_df["% Passing Reading"].map("{:.2f}".format)
scores_by_spending_df["% Overall Passing"] = scores_by_spending_df["% Overall Passing"].map("{:.2f}".format)
scores_by_spending_df
# Make bins
# Make group_names for labels
size_range_bins = [0,1000,2000,5000]
s_group_names = ["Small (<1000)","Medium (1000-2000)","Large (2000-5000)"]
# Categorize the data frame using bins
scores_by_size = school_summary_df
scores_by_size["Size Ranges"] = pd.cut(tot_students, size_range_bins, labels = s_group_names)
# Average Math Score
size_average_math = scores_by_size.groupby(["Size Ranges"]).mean()["Average Math Score"]
# Average Reading Score
size_average_reading = scores_by_size.groupby(["Size Ranges"]).mean()["Average Reading Score"]
# % Passing Math
size_percent_math = scores_by_size.groupby(["Size Ranges"]).mean()["% Passing Math"]
# % Passing Reading
size_percent_reading = scores_by_size.groupby(["Size Ranges"]).mean()["% Passing Reading"]
# % Overall Passing
size_percent_overall = scores_by_size.groupby(["Size Ranges"]).mean()["% Overall Passing"]
# Create DataFrame to store this new information
scores_by_size_df = pd.DataFrame({
"Average Math Score":size_average_math,
"Average Reading Score":size_average_reading,
"% Passing Math":size_percent_math,
"% Passing Reading":size_percent_reading,
"% Overall Passing":size_percent_overall
})
# Cleaning up formatting with .map
scores_by_size_df["Average Math Score"] = scores_by_size_df["Average Math Score"].map("{:.2f}".format)
scores_by_size_df["Average Reading Score"] = scores_by_size_df["Average Reading Score"].map("{:.2f}".format)
scores_by_size_df["% Passing Math"] = scores_by_size_df["% Passing Math"].map("{:.2f}".format)
scores_by_size_df["% Passing Reading"] = scores_by_size_df["% Passing Reading"].map("{:.2f}".format)
scores_by_size_df["% Overall Passing"] = scores_by_size_df["% Overall Passing"].map("{:.2f}".format)
scores_by_size_df
# Groupby school_type
by_type = school_summary_df.groupby(["School Type"])
# Average Math Score
type_avg_math_score = by_type["Average Math Score"].mean()
# Average Reading Score
type_avg_reading_score = by_type["Average Reading Score"].mean()
# % Passing Math
type_percent_math_score = by_type["% Passing Math"].mean()
# % Passing Reading
type_percent_reading_score = by_type["% Passing Reading"].mean()
# % Overall Passing
type_percent_overall_score = by_type["% Overall Passing"].mean()
# Create new dataframe to store school type data
scores_by_type_df = pd.DataFrame({
"Average Math Score":type_avg_math_score,
"Average Reading Score":type_avg_reading_score,
"% Passing Math":type_percent_math_score,
"% Passing Reading":type_percent_reading_score,
"% Overall Passing":type_percent_overall_score
})
# Cleaning up formatting with .map
scores_by_type_df["Average Math Score"] = scores_by_type_df["Average Math Score"].map("{:.2f}".format)
scores_by_type_df["Average Reading Score"] = scores_by_type_df["Average Reading Score"].map("{:.2f}".format)
scores_by_type_df["% Passing Math"] = scores_by_type_df["% Passing Math"].map("{:.2f}".format)
scores_by_type_df["% Passing Reading"] = scores_by_type_df["% Passing Reading"].map("{:.2f}".format)
scores_by_type_df["% Overall Passing"] = scores_by_type_df["% Overall Passing"].map("{:.2f}".format)
scores_by_type_df
| 0.539711 | 0.823328 |
# Node structure
We look whether certain node types are neatly stacked, meaning that
if a node `N` of that type is interrupted by an other node `M` of the same type,
`M` is terminated strictly before `N` is terminated.
If this is the case for a node type, we say that the nodes in that type *stack*.
If it is not the case for a certain node `N`, we say that `N` does not stack.
We inspect the BHSA and see which of there node types do not stack.
```
import os
import collections
from tf.app import use
A = use("bhsa:clone", checkout="clone", hoist=globals())
def getAcross(nodeType):
starts = {}
ends = {}
gapped = []
for n in F.otype.s(nodeType):
slots = E.oslots.s(n)
(first, last) = (slots[0], slots[-1])
starts.setdefault(first, set())
ends.setdefault(last, set())
starts[first].add(n)
ends[last].add(n)
if last - first + 1 > len(slots):
gapped.append(n)
across = set()
for n in gapped:
slots = E.oslots.s(n)
starters = set()
enders = set()
for s in slots:
if s in starts:
for m in starts[s]:
if m != n:
starters.add(m)
if s in ends:
for m in ends[s]:
if m != n:
enders.add(m)
if not starters <= enders:
across.add(n)
print(
f"{nodeType:<20}: {len(gapped):>5} gapped nodes of which {len(across):>5} do not stack"
)
for nodeType in F.otype.all:
if nodeType == "word":
continue
getAcross(nodeType)
```
# Outcome
In the BHSA, all node types *stack*.
## False conjecture (i)
We can characterize all outer and inner boundaries of nodes by specifying for each slot the boolean properties start-of-node and end-of-node, saying whether that slot is the start of a node and the end of a node respectively.
Why is this wrong: if we do not tell which node starts/ends, we have to little information.
Example:
`N1 M1 N2 M2`
These are two nodes `N` and `M`, each consisting of two parts, interleaved.
Consider the first word of `N2`. No node starts there, no node ends there, so start-of-node and end-of-node are both `False`, so we have no way of seeing the alternation between `N` and `M` at this point.
Intuitively, we see that this example does not stack and that might cause the trouble.
## False conjecture (ii)
If a node type *stacks*, the previous conjecture holds true.
But there is a counter example:
`N1 M N2 P1 N3 P2 N4`.
Consider the first word of `N3`. No node starts or ends here, and the example is stacked.
# Method
In order to characterize inner and outer node boundaries, specify for each slot and each node that has a boundary at that slot:
* the node number of the node
* the kind of boundary: start/end/resume/suspend
```
def writeBoundaries(outFile):
boundaries = collections.defaultdict(list)
maxSlot = F.otype.maxSlot
nodeTypes = {"clause", "phrase"}
for nodeType in nodeTypes:
for n in F.otype.s(nodeType):
slots = E.oslots.s(n)
(first, last) = (slots[0], slots[-1])
boundaries[first].append(("B", n))
boundaries[last].append(("E", n))
for (i, s) in enumerate(slots):
if i > 0 and slots[i - 1] != s - 1:
boundaries[s].append(("b", n))
if i < len(slots) - 1 and slots[i + 1] != s + 1:
boundaries[s].append(("e", n))
with open(outFile, "w") as fh:
for s in range(1, maxSlot + 1):
bounds = "-".join("{}{}".format(*x) for x in boundaries[s])
fh.write(f"{s},{bounds}\n")
outFile = os.path.expanduser("~/Downloads/bhsa-boundaries.csv")
writeBoundaries(outFile)
```
|
github_jupyter
|
import os
import collections
from tf.app import use
A = use("bhsa:clone", checkout="clone", hoist=globals())
def getAcross(nodeType):
starts = {}
ends = {}
gapped = []
for n in F.otype.s(nodeType):
slots = E.oslots.s(n)
(first, last) = (slots[0], slots[-1])
starts.setdefault(first, set())
ends.setdefault(last, set())
starts[first].add(n)
ends[last].add(n)
if last - first + 1 > len(slots):
gapped.append(n)
across = set()
for n in gapped:
slots = E.oslots.s(n)
starters = set()
enders = set()
for s in slots:
if s in starts:
for m in starts[s]:
if m != n:
starters.add(m)
if s in ends:
for m in ends[s]:
if m != n:
enders.add(m)
if not starters <= enders:
across.add(n)
print(
f"{nodeType:<20}: {len(gapped):>5} gapped nodes of which {len(across):>5} do not stack"
)
for nodeType in F.otype.all:
if nodeType == "word":
continue
getAcross(nodeType)
def writeBoundaries(outFile):
boundaries = collections.defaultdict(list)
maxSlot = F.otype.maxSlot
nodeTypes = {"clause", "phrase"}
for nodeType in nodeTypes:
for n in F.otype.s(nodeType):
slots = E.oslots.s(n)
(first, last) = (slots[0], slots[-1])
boundaries[first].append(("B", n))
boundaries[last].append(("E", n))
for (i, s) in enumerate(slots):
if i > 0 and slots[i - 1] != s - 1:
boundaries[s].append(("b", n))
if i < len(slots) - 1 and slots[i + 1] != s + 1:
boundaries[s].append(("e", n))
with open(outFile, "w") as fh:
for s in range(1, maxSlot + 1):
bounds = "-".join("{}{}".format(*x) for x in boundaries[s])
fh.write(f"{s},{bounds}\n")
outFile = os.path.expanduser("~/Downloads/bhsa-boundaries.csv")
writeBoundaries(outFile)
| 0.094756 | 0.878471 |
# Descrição dos dados (do Kaggle):
In this competition you will develop algorithms to classify genetic mutations based on clinical evidence (text).
There are nine different classes a genetic mutation can be classified on.
This is not a trivial task since interpreting clinical evidence is very challenging even for human specialists. Therefore, modeling the clinical evidence (text) will be critical for the success of your approach.
Both, training and test, data sets are provided via two different files. One (training/test_variants) provides the information about the genetic mutations, whereas the other (training/test_text) provides the clinical evidence (text) that our human experts used to classify the genetic mutations. Both are linked via the ID field.
Therefore the genetic mutation (row) with ID=15 in the file training_variants, was classified using the clinical evidence (text) from the row with ID=15 in the file training_text
Finally, to make it more exciting!! Some of the test data is machine-generated to prevent hand labeling. You will submit all the results of your classification algorithm, and we will ignore the machine-generated samples.
File descriptions
- training_variants - a comma separated file containing the description of the genetic mutations used for training. Fields are ID (the id of the row used to link the mutation to the clinical evidence), Gene (the gene where this genetic mutation is located), Variation (the aminoacid change for this mutations), Class (1-9 the class this genetic mutation has been classified on)
- training_text - a double pipe (||) delimited file that contains the clinical evidence (text) used to classify genetic mutations. Fields are ID (the id of the row used to link the clinical evidence to the genetic mutation), Text (the clinical evidence used to classify the genetic mutation)
- test_variants - a comma separated file containing the description of the genetic mutations used for training. Fields are ID (the id of the row used to link the mutation to the clinical evidence), Gene (the gene where this genetic mutation is located), Variation (the aminoacid change for this mutations)
- test_text - a double pipe (||) delimited file that contains the clinical evidence (text) used to classify genetic mutations. Fields are ID (the id of the row used to link the clinical evidence to the genetic mutation), Text (the clinical evidence used to classify the genetic mutation)
- submissionSample - a sample submission file in the correct format
```
# Neste caso, o dataset de test não fornece a classe de mutação, pois é destinado somente à previsão para
# submissão à competição do kaggle.
# In this case, the test dataset does not provide the class of the mutation. The test dataset is only for
# submition to kaggle.
```
## Carregando o arquivo training_text
```
import pandas as pd
## Função para estruturar o texto bruto em um dicionário, salvo como resultado intermediário em um artuivo json.
## Function to give the raw text a structure and save the intermediary result in a json file.
def structure_text(path, json_name):
f = open(path, 'r')
text = f.read()
f.close()
lines = text.split(sep='\n')
dummy = {'ID':[], 'Text':[]}
for line in lines:
splitted_line = line.split(sep='||')
if len(splitted_line) > 1:
dummy['ID'].append(splitted_line[0])
dummy['Text'].append(splitted_line[1])
df = pd.DataFrame.from_dict(dummy)
df.set_index('ID', inplace=True)
df.to_json(path_or_buf=json_name, orient='split')
path_1 = './data_files/training_text'
#path_2 = './data_files/test_text'
structure_text(path_1, './data_files/training_text.json')
#structure_text(path_2, './data_files/test_text.json')
df1 = pd.read_json('./data_files/training_text.json', orient='split')
df1.head()
#df2 = pd.read_json('./data_files/test_text.json', orient='split')
#df2.head()
df1
```
## Carregando o arquivo training_variants
```
df2 = pd.read_csv('./data_files/training_variants')
df2
df2.set_index('ID', inplace=True)
df2
# Salvando resultado intermediário in json
# Saving intermediary result in json file.
df2.to_json(path_or_buf='./data_files/training_variants.json', orient='split')
```
df1
## Fazendo a união dos dois datasets pelo índice
```
df3 = df1.merge(df2, left_index=True, right_index=True)
df3
```
|
github_jupyter
|
# Neste caso, o dataset de test não fornece a classe de mutação, pois é destinado somente à previsão para
# submissão à competição do kaggle.
# In this case, the test dataset does not provide the class of the mutation. The test dataset is only for
# submition to kaggle.
import pandas as pd
## Função para estruturar o texto bruto em um dicionário, salvo como resultado intermediário em um artuivo json.
## Function to give the raw text a structure and save the intermediary result in a json file.
def structure_text(path, json_name):
f = open(path, 'r')
text = f.read()
f.close()
lines = text.split(sep='\n')
dummy = {'ID':[], 'Text':[]}
for line in lines:
splitted_line = line.split(sep='||')
if len(splitted_line) > 1:
dummy['ID'].append(splitted_line[0])
dummy['Text'].append(splitted_line[1])
df = pd.DataFrame.from_dict(dummy)
df.set_index('ID', inplace=True)
df.to_json(path_or_buf=json_name, orient='split')
path_1 = './data_files/training_text'
#path_2 = './data_files/test_text'
structure_text(path_1, './data_files/training_text.json')
#structure_text(path_2, './data_files/test_text.json')
df1 = pd.read_json('./data_files/training_text.json', orient='split')
df1.head()
#df2 = pd.read_json('./data_files/test_text.json', orient='split')
#df2.head()
df1
df2 = pd.read_csv('./data_files/training_variants')
df2
df2.set_index('ID', inplace=True)
df2
# Salvando resultado intermediário in json
# Saving intermediary result in json file.
df2.to_json(path_or_buf='./data_files/training_variants.json', orient='split')
df3 = df1.merge(df2, left_index=True, right_index=True)
df3
| 0.207375 | 0.962072 |
```
import math
import torch
import numpy as np
import matplotlib.pyplot as plt
device = 'cuda' if torch.cuda.is_available() else 'cpu'
```
# Exploring optimisation of analytic functions
```
def rastrigin(X, A=1.0):
return A*2 + ( (X[0]**2 - A*torch.cos(2*math.pi*X[0])) + (X[1]**2 - A*torch.cos(2*math.pi*X[1])) )
xmin, xmax, xstep = -5, 5, .2
ymin, ymax, ystep = -5, 5, .2
xs = np.arange(xmin, (xmax + xstep), xstep)
ys = np.arange(ymin, (ymax + ystep), ystep)
z = rastrigin(torch.tensor([xs,ys]), A=1.0).numpy()
fig, ax = plt.subplots(figsize=(8,6))
ax.plot(xs, z)
plt.tight_layout()
plt.savefig('rastrigin.png')
```
With $A=1.0$ we find the Rastrigin function has many small 'bumps' or local minima. However the main basin at $x=0$ is the global minimum we want our optimisers to reach.
```
p_SGD = torch.tensor([5.0, 5.0], requires_grad=True, device=device)
p_SGD_Mom = torch.tensor([5.0, 5.0], requires_grad=True, device=device)
p_Adagrad = torch.tensor([5.0, 5.0], requires_grad=True, device=device)
p_Adam = torch.tensor([5.0, 5.0], requires_grad=True, device=device)
print('Parameters initialised:\n ', p_SGD, p_SGD.type())
epochs = 100
print('Max epochs:\n ', epochs)
A = 1.0
opt_SGD = torch.optim.SGD([p_SGD], lr=0.01)
print(f'Initialised SGD:\n Learning rate:{0.01}')
opt_SGD_Mom = torch.optim.SGD([p_SGD_Mom], lr=0.01, momentum=0.09)
print(f'Initialised SGD Momentum:\n Learning rate:{0.01}, Momentum:{0.09}')
opt_Adagrad = torch.optim.Adagrad([p_Adagrad], lr=0.01)
print(f'Initialised Adagrad:\n Learning rate:{0.01}')
opt_Adam = torch.optim.Adam([p_Adam], lr=0.01)
print(f'Initialised Adam:\n Learning rate:{0.01}')
plt_loss_SGD = []
plt_loss_SGD_Mom = []
plt_loss_Adagrad = []
plt_loss_Adam = []
for epoch in range(epochs):
# zero gradients
opt_SGD.zero_grad()
opt_SGD_Mom.zero_grad()
opt_Adagrad.zero_grad()
opt_Adam.zero_grad()
# compute loss
loss_SGD = rastrigin(p_SGD, A=A)
loss_SGD_Mom = rastrigin(p_SGD_Mom, A=A)
loss_Adagrad = rastrigin(p_Adagrad, A=A)
loss_Adam = rastrigin(p_Adam, A=A)
# backprop
loss_SGD.backward()
loss_SGD_Mom.backward()
loss_Adagrad.backward()
loss_Adam.backward()
# step optimiser
opt_SGD.step()
opt_SGD_Mom.step()
opt_Adagrad.step()
opt_Adam.step()
# store loss for plots
plt_loss_SGD.append(loss_SGD.item())
plt_loss_SGD_Mom.append(loss_SGD_Mom.item())
plt_loss_Adagrad.append(loss_Adagrad.item())
plt_loss_Adam.append(loss_Adam.item())
print(f'Loss function:\n Rastrigin, A={A}')
fig, ax = plt.subplots(figsize=(8,5))
ax.plot(plt_loss_SGD, label='SGD', linewidth=2, alpha=.6)
ax.plot(plt_loss_SGD_Mom, label='SGD Momentum', linewidth=2, alpha=.6)
ax.plot(plt_loss_Adagrad, label='Adagrad', linewidth=2, alpha=.6)
ax.plot(plt_loss_Adam, label='Adam', linewidth=2, alpha=.6)
ax.legend()
ax.set_xlabel('Epoch')
ax.set_ylabel('Loss')
plt.tight_layout()
plt.savefig('optimiser_comparison_rastrigin.png')
```
Rastrigin is a difficult function to optimise as it's filled with many local minima, but there is only one global minimum. We find that SGD + Momentum shows the best performance when applied to a 2D Rastrigin function with $A=1.0$ (a parameter which determines how 'bumpy' the function is).
# Optimisation of a SVM on real data
Applying soft-margin SVM to Iris data and optimise its paramters using gradient descent.
Note: we will only be using two of the four classes from the dataset.
An SVM tries to find the maximum margin hyperplane which separates the data classes. For a soft margin SVM
where $\textbf{x}$ is our data, we minimize:
\begin{equation}
\left[\frac 1 n \sum_{i=1}^n \max\left(0, 1 - y_i(\textbf{w}\cdot \textbf{x}_i - b)\right) \right] + \lambda\lVert \textbf{w} \rVert^2
\end{equation}
We can formulate this as an optimization over our weights $\textbf{w}$ and bias $b$, where we minimize the
hinge loss subject to a level 2 weight decay term. The hinge loss for some model outputs
$z = \textbf{w}\textbf{x} + b$ with targets $y$ is given by:
\begin{equation}
\ell(y,z) = \max\left(0, 1 - yz \right)
\end{equation}
```
import torch
import pandas as pd
from tqdm.auto import tqdm
import matplotlib.pyplot as plt
def svm(x,w,b):
h = (w*x).sum(1) + b
return h
def hinge_loss(z, y):
yz = y * z
return torch.max(torch.zeros_like(yz), (1-yz))
# test
print(hinge_loss(torch.randn(2), torch.randn(2) > 0).float())
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
df = pd.read_csv(url, header=None)
df = df.sample(frac=1, random_state=0) # shuffle
df = df[df[4].isin(['Iris-virginica', 'Iris-versicolor'])] # filter
# add label indices column
mapping = {k: v for v, k in enumerate(df[4].unique())}
df[5] = (2 * df[4].map(mapping)) - 1 # labels in {-1,1}
# normalise data
alldata = torch.tensor(df.iloc[:, [0,1,2,3]].values, dtype=torch.float)
alldata = (alldata - alldata.mean(dim=0)) / alldata.var(dim=0)
# create datasets
targets_tr = torch.tensor(df.iloc[:75, 5].values, dtype=torch.long)
targets_va = torch.tensor(df.iloc[75:, 5].values, dtype=torch.long)
data_tr = alldata[:75]
data_va = alldata[75:]
from torch.utils import data
# mini-batch training data
dataset_tr = data.TensorDataset(data_tr, targets_tr)
dataloader_tr = data.DataLoader(dataset_tr, batch_size=25, shuffle=True)
# mini-batch test data
dataset_va = data.TensorDataset(data_va, targets_va)
dataloader_va = data.DataLoader(dataset_va, batch_size=25, shuffle=True)
def eval_accuracy(predictions, labels):
a = sum(predictions.detach().numpy() * labels.numpy() >=0) / len(labels)
return a
def svm_train(train_dataloader, data_va, targets_va, epochs=100, lr=0.01, decay=0.01):
print('Support Vector Machine:')
print(' learning_rate:', lr)
print(' epochs:', epochs)
train_rows, train_col = train_dataloader.dataset.tensors[0].shape
print(f' X_train.shape: ({train_rows},{train_col})')
# train_y_rows = dataloader.dataset.tensors[1].shape[0]
# print(f' y_train.shape: ({train_y_rows})')
# test_rows, test_col = test_dataloader.dataset.tensors[0].shape
# print(f' X_test.shape: ({train_rows},{train_col})')
# test_y_rows = dataloader.dataset.tensors[1].shape[0]
# print(f' y_test.shape: ({test_y_rows})')
print('---------------------------------')
# initialise weights and biases
w = torch.randn(1, train_col, requires_grad=True)
b = torch.randn(1, requires_grad=True)
optimiser = torch.optim.SGD([w,b], lr=lr, weight_decay=decay)
# optimiser = torch.optim.Adam([w,b], lr=lr, weight_decay=decay)
# record loss over epoch
losses = []
# record training accuracy over epoch
train_accuracy = []
# record validation accuracy over epoch
test_accuracy = []
print('Training model, please wait...')
for epoch in tqdm(range(epochs)):
ep_loss = 0
for train_batch in train_dataloader:
# zero gradients
optimiser.zero_grad()
# compute loss
X_train, y_train = train_batch
y_pred = svm(X_train, w, b)
loss = hinge_loss(y_pred, y_train).mean()
# backprop
loss.backward()
# step optimiser
optimiser.step()
# track loss
ep_loss += loss.item()
losses.append(ep_loss)
# training accuracy
ep_train_pred = svm(X_train, w, b)
ep_train_acc = eval_accuracy(ep_train_pred, y_train)
train_accuracy.append(ep_train_acc)
# validation accuracy
ep_test_pred = svm(data_va, w, b)
ep_test_acc = eval_accuracy(ep_test_pred, targets_va)
test_accuracy.append(ep_test_acc)
print(f'Training accuracy: {train_accuracy[-1]*100}%')
print(f'Validation accuracy: {ep_test_acc*100}%')
print(f'Train loss: {losses[-1]}')
return train_accuracy, test_accuracy, losses
tr_acc, va_acc, loss = svm_train(dataloader_tr, data_va, targets_va,
epochs=100, lr=0.01, decay=0.01)
# SGD training accuracy
fig, ax = plt.subplots(figsize=(6,3))
ax.plot(range(len(tr_acc)), tr_acc, label='Training Accuracy (SGD)')
ax.plot(range(len(va_acc)), va_acc, label='Validation Accuracy (SGD)')
ax.set_xlabel('Epoch')
ax.set_ylabel('Accuracy')
ax.grid(True)
ax.legend(loc='best', frameon=True)
plt.tight_layout()
plt.savefig('sgd_tr_va_acc.png')
# Adam training accuracy
fig, ax = plt.subplots(figsize=(6,3))
ax.plot(range(len(tr_acc)), tr_acc, label='Training Accuracy (Adam)')
ax.plot(range(len(va_acc)), va_acc, label='Validation Accuracy (Adam)')
ax.set_xlabel('Epoch')
ax.set_ylabel('Accuracy')
ax.grid(True)
ax.legend(loc='best', frameon=True)
plt.tight_layout()
plt.savefig('adam_tr_va_acc.png')
tr_acc_SGD, va_acc_SGD, loss_SGD = svm_train(dataloader_tr, data_va, targets_va,
epochs=100, lr=0.01, decay=0.01)
tr_acc_Adam, va_acc_Adam, loss_Adam = svm_train(dataloader_tr, data_va, targets_va,
epochs=100, lr=0.01, decay=0.01)
# SGD vs Adam validation accuracy
fig, ax = plt.subplots(figsize=(6,3))
ax.plot(range(len(va_acc)), va_acc_SGD, label='Validation Accuracy (SGD)')
ax.plot(range(len(va_acc)), va_acc_Adam, label='Validation Accuracy (Adam)')
ax.set_xlabel('Epoch')
ax.set_ylabel('Accuracy')
ax.grid(True)
ax.legend(loc='best', frameon=True)
plt.tight_layout()
plt.savefig('sgd_vs_adam_acc_1.png')
```
SGD makes bigger jumps in accuracy whilst Adam makes smaller ones. This results in SGD obtaining higher final validation accuracy.
```
fig, ax = plt.subplots(figsize=(6, 3))
ax.plot(range(len(loss)), loss_SGD, label='Training Loss (SGD)')
ax.plot(range(len(loss)), loss_Adam, label='Training Loss (Adam)')
ax.set_xlabel('Epoch')
ax.set_ylabel('Loss')
ax.grid(True)
ax.legend(loc='best', frameon=True)
plt.tight_layout()
plt.savefig('sgd_vs_adam_loss.png')
```
Adam's loss starts off at a very large value and it does a good job of minimising it such that it matches the superior performance of SGD.
|
github_jupyter
|
import math
import torch
import numpy as np
import matplotlib.pyplot as plt
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def rastrigin(X, A=1.0):
return A*2 + ( (X[0]**2 - A*torch.cos(2*math.pi*X[0])) + (X[1]**2 - A*torch.cos(2*math.pi*X[1])) )
xmin, xmax, xstep = -5, 5, .2
ymin, ymax, ystep = -5, 5, .2
xs = np.arange(xmin, (xmax + xstep), xstep)
ys = np.arange(ymin, (ymax + ystep), ystep)
z = rastrigin(torch.tensor([xs,ys]), A=1.0).numpy()
fig, ax = plt.subplots(figsize=(8,6))
ax.plot(xs, z)
plt.tight_layout()
plt.savefig('rastrigin.png')
p_SGD = torch.tensor([5.0, 5.0], requires_grad=True, device=device)
p_SGD_Mom = torch.tensor([5.0, 5.0], requires_grad=True, device=device)
p_Adagrad = torch.tensor([5.0, 5.0], requires_grad=True, device=device)
p_Adam = torch.tensor([5.0, 5.0], requires_grad=True, device=device)
print('Parameters initialised:\n ', p_SGD, p_SGD.type())
epochs = 100
print('Max epochs:\n ', epochs)
A = 1.0
opt_SGD = torch.optim.SGD([p_SGD], lr=0.01)
print(f'Initialised SGD:\n Learning rate:{0.01}')
opt_SGD_Mom = torch.optim.SGD([p_SGD_Mom], lr=0.01, momentum=0.09)
print(f'Initialised SGD Momentum:\n Learning rate:{0.01}, Momentum:{0.09}')
opt_Adagrad = torch.optim.Adagrad([p_Adagrad], lr=0.01)
print(f'Initialised Adagrad:\n Learning rate:{0.01}')
opt_Adam = torch.optim.Adam([p_Adam], lr=0.01)
print(f'Initialised Adam:\n Learning rate:{0.01}')
plt_loss_SGD = []
plt_loss_SGD_Mom = []
plt_loss_Adagrad = []
plt_loss_Adam = []
for epoch in range(epochs):
# zero gradients
opt_SGD.zero_grad()
opt_SGD_Mom.zero_grad()
opt_Adagrad.zero_grad()
opt_Adam.zero_grad()
# compute loss
loss_SGD = rastrigin(p_SGD, A=A)
loss_SGD_Mom = rastrigin(p_SGD_Mom, A=A)
loss_Adagrad = rastrigin(p_Adagrad, A=A)
loss_Adam = rastrigin(p_Adam, A=A)
# backprop
loss_SGD.backward()
loss_SGD_Mom.backward()
loss_Adagrad.backward()
loss_Adam.backward()
# step optimiser
opt_SGD.step()
opt_SGD_Mom.step()
opt_Adagrad.step()
opt_Adam.step()
# store loss for plots
plt_loss_SGD.append(loss_SGD.item())
plt_loss_SGD_Mom.append(loss_SGD_Mom.item())
plt_loss_Adagrad.append(loss_Adagrad.item())
plt_loss_Adam.append(loss_Adam.item())
print(f'Loss function:\n Rastrigin, A={A}')
fig, ax = plt.subplots(figsize=(8,5))
ax.plot(plt_loss_SGD, label='SGD', linewidth=2, alpha=.6)
ax.plot(plt_loss_SGD_Mom, label='SGD Momentum', linewidth=2, alpha=.6)
ax.plot(plt_loss_Adagrad, label='Adagrad', linewidth=2, alpha=.6)
ax.plot(plt_loss_Adam, label='Adam', linewidth=2, alpha=.6)
ax.legend()
ax.set_xlabel('Epoch')
ax.set_ylabel('Loss')
plt.tight_layout()
plt.savefig('optimiser_comparison_rastrigin.png')
import torch
import pandas as pd
from tqdm.auto import tqdm
import matplotlib.pyplot as plt
def svm(x,w,b):
h = (w*x).sum(1) + b
return h
def hinge_loss(z, y):
yz = y * z
return torch.max(torch.zeros_like(yz), (1-yz))
# test
print(hinge_loss(torch.randn(2), torch.randn(2) > 0).float())
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
df = pd.read_csv(url, header=None)
df = df.sample(frac=1, random_state=0) # shuffle
df = df[df[4].isin(['Iris-virginica', 'Iris-versicolor'])] # filter
# add label indices column
mapping = {k: v for v, k in enumerate(df[4].unique())}
df[5] = (2 * df[4].map(mapping)) - 1 # labels in {-1,1}
# normalise data
alldata = torch.tensor(df.iloc[:, [0,1,2,3]].values, dtype=torch.float)
alldata = (alldata - alldata.mean(dim=0)) / alldata.var(dim=0)
# create datasets
targets_tr = torch.tensor(df.iloc[:75, 5].values, dtype=torch.long)
targets_va = torch.tensor(df.iloc[75:, 5].values, dtype=torch.long)
data_tr = alldata[:75]
data_va = alldata[75:]
from torch.utils import data
# mini-batch training data
dataset_tr = data.TensorDataset(data_tr, targets_tr)
dataloader_tr = data.DataLoader(dataset_tr, batch_size=25, shuffle=True)
# mini-batch test data
dataset_va = data.TensorDataset(data_va, targets_va)
dataloader_va = data.DataLoader(dataset_va, batch_size=25, shuffle=True)
def eval_accuracy(predictions, labels):
a = sum(predictions.detach().numpy() * labels.numpy() >=0) / len(labels)
return a
def svm_train(train_dataloader, data_va, targets_va, epochs=100, lr=0.01, decay=0.01):
print('Support Vector Machine:')
print(' learning_rate:', lr)
print(' epochs:', epochs)
train_rows, train_col = train_dataloader.dataset.tensors[0].shape
print(f' X_train.shape: ({train_rows},{train_col})')
# train_y_rows = dataloader.dataset.tensors[1].shape[0]
# print(f' y_train.shape: ({train_y_rows})')
# test_rows, test_col = test_dataloader.dataset.tensors[0].shape
# print(f' X_test.shape: ({train_rows},{train_col})')
# test_y_rows = dataloader.dataset.tensors[1].shape[0]
# print(f' y_test.shape: ({test_y_rows})')
print('---------------------------------')
# initialise weights and biases
w = torch.randn(1, train_col, requires_grad=True)
b = torch.randn(1, requires_grad=True)
optimiser = torch.optim.SGD([w,b], lr=lr, weight_decay=decay)
# optimiser = torch.optim.Adam([w,b], lr=lr, weight_decay=decay)
# record loss over epoch
losses = []
# record training accuracy over epoch
train_accuracy = []
# record validation accuracy over epoch
test_accuracy = []
print('Training model, please wait...')
for epoch in tqdm(range(epochs)):
ep_loss = 0
for train_batch in train_dataloader:
# zero gradients
optimiser.zero_grad()
# compute loss
X_train, y_train = train_batch
y_pred = svm(X_train, w, b)
loss = hinge_loss(y_pred, y_train).mean()
# backprop
loss.backward()
# step optimiser
optimiser.step()
# track loss
ep_loss += loss.item()
losses.append(ep_loss)
# training accuracy
ep_train_pred = svm(X_train, w, b)
ep_train_acc = eval_accuracy(ep_train_pred, y_train)
train_accuracy.append(ep_train_acc)
# validation accuracy
ep_test_pred = svm(data_va, w, b)
ep_test_acc = eval_accuracy(ep_test_pred, targets_va)
test_accuracy.append(ep_test_acc)
print(f'Training accuracy: {train_accuracy[-1]*100}%')
print(f'Validation accuracy: {ep_test_acc*100}%')
print(f'Train loss: {losses[-1]}')
return train_accuracy, test_accuracy, losses
tr_acc, va_acc, loss = svm_train(dataloader_tr, data_va, targets_va,
epochs=100, lr=0.01, decay=0.01)
# SGD training accuracy
fig, ax = plt.subplots(figsize=(6,3))
ax.plot(range(len(tr_acc)), tr_acc, label='Training Accuracy (SGD)')
ax.plot(range(len(va_acc)), va_acc, label='Validation Accuracy (SGD)')
ax.set_xlabel('Epoch')
ax.set_ylabel('Accuracy')
ax.grid(True)
ax.legend(loc='best', frameon=True)
plt.tight_layout()
plt.savefig('sgd_tr_va_acc.png')
# Adam training accuracy
fig, ax = plt.subplots(figsize=(6,3))
ax.plot(range(len(tr_acc)), tr_acc, label='Training Accuracy (Adam)')
ax.plot(range(len(va_acc)), va_acc, label='Validation Accuracy (Adam)')
ax.set_xlabel('Epoch')
ax.set_ylabel('Accuracy')
ax.grid(True)
ax.legend(loc='best', frameon=True)
plt.tight_layout()
plt.savefig('adam_tr_va_acc.png')
tr_acc_SGD, va_acc_SGD, loss_SGD = svm_train(dataloader_tr, data_va, targets_va,
epochs=100, lr=0.01, decay=0.01)
tr_acc_Adam, va_acc_Adam, loss_Adam = svm_train(dataloader_tr, data_va, targets_va,
epochs=100, lr=0.01, decay=0.01)
# SGD vs Adam validation accuracy
fig, ax = plt.subplots(figsize=(6,3))
ax.plot(range(len(va_acc)), va_acc_SGD, label='Validation Accuracy (SGD)')
ax.plot(range(len(va_acc)), va_acc_Adam, label='Validation Accuracy (Adam)')
ax.set_xlabel('Epoch')
ax.set_ylabel('Accuracy')
ax.grid(True)
ax.legend(loc='best', frameon=True)
plt.tight_layout()
plt.savefig('sgd_vs_adam_acc_1.png')
fig, ax = plt.subplots(figsize=(6, 3))
ax.plot(range(len(loss)), loss_SGD, label='Training Loss (SGD)')
ax.plot(range(len(loss)), loss_Adam, label='Training Loss (Adam)')
ax.set_xlabel('Epoch')
ax.set_ylabel('Loss')
ax.grid(True)
ax.legend(loc='best', frameon=True)
plt.tight_layout()
plt.savefig('sgd_vs_adam_loss.png')
| 0.702938 | 0.895477 |
```
import numpy as np
a=[1,2,3]
b=np.array(a)
print(b)
a+[5]
2*b
print(np.sqrt(a))
print(np.log(a))
print(np.exp(a))
k=np.array([3,4,5])
print(k)
dot=0
for e,f in zip(a,k):
print(e,f)
dot+=(e*f)
print(dot)
print(a*k)
print("Hello")
import numpy as np
M =np.array([[1,2,3] ,[3,4,5]])
print(M)
m2=np.matrix(M)
print(m2)
m2 = np.matrix([[1,2],[3,4]])
print(m2)
print(np.matrix([[1,2],[3,4]]))
M.T
np.zeros(10)
np.zeros(10).T
(np.zeros(10)).T
np.random.random((10,10))
np.random.random((10,10,10))
np.random.random(10)
G=np.random.randn(10,10)
print('mean: ',G.mean(),"\n","Variance: ",G.var())
A=np.random.random((2,2))*20
print(A)
Ainv = np.linalg.inv(A)
print(Ainv)
Ainv.dot(A)
a=np.array([1,2])
b=np.array([3,4])
np.outer(a,b)
np.inner(a,b)
X=np.random.randn(10,3)
cov = np.cov(X)
print(cov.shape)
cov = np.cov(X.T)
#cov.shape
print(cov.shape)
cov
np.linalg.eigh(cov)
A=[[1,2,3],[4,5,6],[7,8,9]]
B=[10,11,12]
print(A,"\n",B)
x=np.linalg.inv(A).dot(B)
print(x)
x=np.linalg.solve(A,B)
print(x)
x=np.linalg.solve(A,B)
print(x)
number=[[1,2] ,[1.5 ,4]]
price=[2200,5050]
x=np.linalg.solve(number,price)
print(x)
x=np.linalg.inv(A).dot(B)
print(x)
number=np.array([[1,1] ,[1.5 ,4]])
price=np.array([2200,5050])
x=np.linalg.solve(number,price)
print(x)
randData=np.random.randn(100,3)*100
print(randData)
randData=np.array(np.random.randn(100,3)*100)
print(randData)
X=[]
for line in open("data.csv"):
row = line.split(',')
sample=map(float, row)
#print(sample)
X.append(sample)
print(X)
import pandas as pd
X=pd.read_csv("data.csv",header=None)
print('Worked')
print(X)
type(X)
X.info()
X.head(10)
import pandas as pd
X=pd.read_csv("data.csv",header=None)
print(X)
X.info()
M= X.as_matrix()
type(M)
X[0]
X.iloc[0]
X[[0,2]]
X[ X[0]<=5]
X[0]<5
data =pd.read_csv("data2.csv",engine="python",skipfooter=3)
data.columns
data.columns= ['Number','Names']
data.columns
data.Number
data.Names
data['other']=1
data.columns
data.head()
data[1:20]
from datetime import datetime
datetime.strptime("1949-05","%Y-%m")
import matplotlib.pyplot as plt
x = np.linspace(0,10,100)
y=np.sin(x)
plt.plot(x,y)
plt.xlabel("X")
plt.ylabel("Sin(X)");plt.title("Sine Wave Graph")
plt.show()
A=pd.read_csv("data3.csv").values
x=A[:,0]
y=A[:,1]
plt.plot(x,y)
plt.plot(x,y);
plt.scatter(x,y);
plt.show()
plt.hist(x)
R=np.random.rand(40)
plt.hist(R)
R=np.random.rand(40)
plt.hist(R,bins=50)
df = pd.read_csv("train.csv")
M= df.as_matrix()
im=M[0,1:]
im.shape
df.shape
im=im.reshape(28,28)
plt.imshow(im)
plt.imshow(im, cmap='gray')
plt.imshow(255-im, cmap='gray')
plt.imshow(255-im)
from scipy.stats import norm
print(norm.pdf(0))
norm.pdf(0,loc=5,scale=10)
r=np.random.rand(10)
norm.pdf(r)
norm.logpdf(r)
norm.cdf(r)
norm.logcdf(r)
A=np.random.randn(10000)
plt.hist(A,bins=1000)
A= 10*A + 5
plt.hist(A,bins=500)
r=np.random.randn(10000,2)
plt.scatter(r[:,0],r[:,1])
r[:1]=5*r[:1]+2
plt.scatter(r[:,0],r[:,1])
plt.scatter(r[:,0],r[:,1])
plt.axis('equal')
cov=np.array([[1,0.8],[0.8,3]])
from scipy.stats import multivariate_normal as mvn
mu =np.array([0,2])
r= mvn.rvs(mean=mu, cov=cov, size=1000)
plt.scatter(r[:,0],r[:,1])
plt.axis('equal')
r=np.random.multivariate_normal(mean=mu,cov=cov,size=1000)
plt.scatter(r[:,0],r[:,1])
plt.axis('equal')
x=np.linspace(0,100,10000)
y=np.sin(x)+np.sin(3*x)+np.sin(5*x)
plt.plot(y)
y=np.fft.fft(y)
plt.plot(np.abs(y))
```
|
github_jupyter
|
import numpy as np
a=[1,2,3]
b=np.array(a)
print(b)
a+[5]
2*b
print(np.sqrt(a))
print(np.log(a))
print(np.exp(a))
k=np.array([3,4,5])
print(k)
dot=0
for e,f in zip(a,k):
print(e,f)
dot+=(e*f)
print(dot)
print(a*k)
print("Hello")
import numpy as np
M =np.array([[1,2,3] ,[3,4,5]])
print(M)
m2=np.matrix(M)
print(m2)
m2 = np.matrix([[1,2],[3,4]])
print(m2)
print(np.matrix([[1,2],[3,4]]))
M.T
np.zeros(10)
np.zeros(10).T
(np.zeros(10)).T
np.random.random((10,10))
np.random.random((10,10,10))
np.random.random(10)
G=np.random.randn(10,10)
print('mean: ',G.mean(),"\n","Variance: ",G.var())
A=np.random.random((2,2))*20
print(A)
Ainv = np.linalg.inv(A)
print(Ainv)
Ainv.dot(A)
a=np.array([1,2])
b=np.array([3,4])
np.outer(a,b)
np.inner(a,b)
X=np.random.randn(10,3)
cov = np.cov(X)
print(cov.shape)
cov = np.cov(X.T)
#cov.shape
print(cov.shape)
cov
np.linalg.eigh(cov)
A=[[1,2,3],[4,5,6],[7,8,9]]
B=[10,11,12]
print(A,"\n",B)
x=np.linalg.inv(A).dot(B)
print(x)
x=np.linalg.solve(A,B)
print(x)
x=np.linalg.solve(A,B)
print(x)
number=[[1,2] ,[1.5 ,4]]
price=[2200,5050]
x=np.linalg.solve(number,price)
print(x)
x=np.linalg.inv(A).dot(B)
print(x)
number=np.array([[1,1] ,[1.5 ,4]])
price=np.array([2200,5050])
x=np.linalg.solve(number,price)
print(x)
randData=np.random.randn(100,3)*100
print(randData)
randData=np.array(np.random.randn(100,3)*100)
print(randData)
X=[]
for line in open("data.csv"):
row = line.split(',')
sample=map(float, row)
#print(sample)
X.append(sample)
print(X)
import pandas as pd
X=pd.read_csv("data.csv",header=None)
print('Worked')
print(X)
type(X)
X.info()
X.head(10)
import pandas as pd
X=pd.read_csv("data.csv",header=None)
print(X)
X.info()
M= X.as_matrix()
type(M)
X[0]
X.iloc[0]
X[[0,2]]
X[ X[0]<=5]
X[0]<5
data =pd.read_csv("data2.csv",engine="python",skipfooter=3)
data.columns
data.columns= ['Number','Names']
data.columns
data.Number
data.Names
data['other']=1
data.columns
data.head()
data[1:20]
from datetime import datetime
datetime.strptime("1949-05","%Y-%m")
import matplotlib.pyplot as plt
x = np.linspace(0,10,100)
y=np.sin(x)
plt.plot(x,y)
plt.xlabel("X")
plt.ylabel("Sin(X)");plt.title("Sine Wave Graph")
plt.show()
A=pd.read_csv("data3.csv").values
x=A[:,0]
y=A[:,1]
plt.plot(x,y)
plt.plot(x,y);
plt.scatter(x,y);
plt.show()
plt.hist(x)
R=np.random.rand(40)
plt.hist(R)
R=np.random.rand(40)
plt.hist(R,bins=50)
df = pd.read_csv("train.csv")
M= df.as_matrix()
im=M[0,1:]
im.shape
df.shape
im=im.reshape(28,28)
plt.imshow(im)
plt.imshow(im, cmap='gray')
plt.imshow(255-im, cmap='gray')
plt.imshow(255-im)
from scipy.stats import norm
print(norm.pdf(0))
norm.pdf(0,loc=5,scale=10)
r=np.random.rand(10)
norm.pdf(r)
norm.logpdf(r)
norm.cdf(r)
norm.logcdf(r)
A=np.random.randn(10000)
plt.hist(A,bins=1000)
A= 10*A + 5
plt.hist(A,bins=500)
r=np.random.randn(10000,2)
plt.scatter(r[:,0],r[:,1])
r[:1]=5*r[:1]+2
plt.scatter(r[:,0],r[:,1])
plt.scatter(r[:,0],r[:,1])
plt.axis('equal')
cov=np.array([[1,0.8],[0.8,3]])
from scipy.stats import multivariate_normal as mvn
mu =np.array([0,2])
r= mvn.rvs(mean=mu, cov=cov, size=1000)
plt.scatter(r[:,0],r[:,1])
plt.axis('equal')
r=np.random.multivariate_normal(mean=mu,cov=cov,size=1000)
plt.scatter(r[:,0],r[:,1])
plt.axis('equal')
x=np.linspace(0,100,10000)
y=np.sin(x)+np.sin(3*x)+np.sin(5*x)
plt.plot(y)
y=np.fft.fft(y)
plt.plot(np.abs(y))
| 0.225246 | 0.507446 |
<!-- dom:TITLE: Computational Physics Lectures: Numerical integration, from Newton-Cotes quadrature to Gaussian quadrature -->
# Computational Physics Lectures: Numerical integration, from Newton-Cotes quadrature to Gaussian quadrature
<!-- dom:AUTHOR: Morten Hjorth-Jensen at Department of Physics, University of Oslo & Department of Physics and Astronomy and National Superconducting Cyclotron Laboratory, Michigan State University -->
<!-- Author: -->
**Morten Hjorth-Jensen**, Department of Physics, University of Oslo and Department of Physics and Astronomy and National Superconducting Cyclotron Laboratory, Michigan State University
Date: **Aug 23, 2017**
Copyright 1999-2017, Morten Hjorth-Jensen. Released under CC Attribution-NonCommercial 4.0 license
## Numerical Integration
Here we will discuss some of the classical methods for integrating a function. The methods we discuss are
1. Equal step methods like the trapezoidal, rectangular and Simpson's rule, parts of what are called Newton-Cotes quadrature methods.
2. Integration approaches based on Gaussian quadrature.
The latter are more suitable
for the case where the abscissas are not equally spaced.
We emphasize methods for evaluating few-dimensional (typically up to four dimensions) integrals. Multi-dimensional integrals will be discussed in connection with Monte Carlo methods.
## Newton-Cotes Quadrature or equal-step methods
The integral
<!-- Equation labels as ordinary links -->
<div id="eq:integraldef"></div>
$$
\begin{equation}
I=\int_a^bf(x) dx
\label{eq:integraldef} \tag{1}
\end{equation}
$$
has a very simple meaning. The integral is the
area enscribed by the function $f(x)$ starting from $x=a$ to $x=b$. It is subdivided in several smaller areas whose evaluation is to be approximated by different techniques. The areas under the curve can for example be approximated by rectangular boxes or trapezoids.
<!-- !split -->
## Basic philosophy of equal-step methods
In considering equal step methods, our basic approach is that of approximating
a function $f(x)$ with a polynomial of at most
degree $N-1$, given $N$ integration points. If our polynomial is of degree $1$,
the function will be approximated with $f(x)\approx a_0+a_1x$.
<!-- !split -->
## Simple algorithm for equal step methods
The algorithm for these integration methods is rather simple, and the number of approximations perhaps unlimited!
* Choose a step size $h=(b-a)/N$ where $N$ is the number of steps and $a$ and $b$ the lower and upper limits of integration.
* With a given step length we rewrite the integral as
$$
\int_a^bf(x) dx= \int_a^{a+h}f(x)dx + \int_{a+h}^{a+2h}f(x)dx+\dots \int_{b-h}^{b}f(x)dx.
$$
* The strategy then is to find a reliable polynomial approximation for $f(x)$ in the various intervals. Choosing a given approximation for $f(x)$, we obtain a specific approximation to the integral.
* With this approximation to $f(x)$ we perform the integration by computing the integrals over all subintervals.
<!-- !split -->
## Simple algorithm for equal step methods
One possible strategy then is to find a reliable polynomial expansion for $f(x)$ in the smaller
subintervals. Consider for example evaluating
$$
\int_a^{a+2h}f(x)dx,
$$
which we rewrite as
<!-- Equation labels as ordinary links -->
<div id="eq:hhint"></div>
$$
\begin{equation}
\int_a^{a+2h}f(x)dx=
\int_{x_0-h}^{x_0+h}f(x)dx.
\label{eq:hhint} \tag{2}
\end{equation}
$$
We have chosen a midpoint $x_0$ and have defined $x_0=a+h$.
<!-- !split -->
## Lagrange's interpolation formula
Using Lagrange's interpolation formula
$$
P_N(x)=\sum_{i=0}^{N}\prod_{k\ne i} \frac{x-x_k}{x_i-x_k}y_i,
$$
we could attempt to approximate the function $f(x)$ with a first-order polynomial in $x$ in the two
sub-intervals $x\in[x_0-h,x_0]$ and $x\in[x_0,x_0+h]$. A first order polynomial means simply that
we have for say the interval $x\in[x_0,x_0+h]$
$$
f(x)\approx P_1(x)=\frac{x-x_0}{(x_0+h)-x_0}f(x_0+h)+\frac{x-(x_0+h)}{x_0-(x_0+h)}f(x_0),
$$
and for the interval $x\in[x_0-h,x_0]$
$$
f(x)\approx P_1(x)=\frac{x-(x_0-h)}{x_0-(x_0-h)}f(x_0)+\frac{x-x_0}{(x_0-h)-x_0}f(x_0-h).
$$
<!-- !split -->
## Polynomial approximation
Having performed this subdivision and polynomial approximation,
one from $x_0-h$ to $x_0$ and the other from $x_0$ to $x_0+h$,
$$
\int_a^{a+2h}f(x)dx=\int_{x_0-h}^{x_0}f(x)dx+\int_{x_0}^{x_0+h}f(x)dx,
$$
we can easily calculate for example the second integral as
$$
\int_{x_0}^{x_0+h}f(x)dx\approx \int_{x_0}^{x_0+h}\left(\frac{x-x_0}{(x_0+h)-x_0}f(x_0+h)+\frac{x-(x_0+h)}{x_0-(x_0+h)}f(x_0)\right)dx.
$$
<!-- !split -->
## Simplifying the integral
This integral can be simplified to
$$
\int_{x_0}^{x_0+h}f(x)dx\approx \int_{x_0}^{x_0+h}\left(\frac{x-x_0}{h}f(x_0+h)-\frac{x-(x_0+h)}{h}f(x_0)\right)dx,
$$
resulting in
$$
\int_{x_0}^{x_0+h}f(x)dx=\frac{h}{2}\left(f(x_0+h) + f(x_0)\right)+O(h^3).
$$
Here we added the error made in approximating our integral
with a polynomial of degree $1$.
<!-- !split -->
## The trapezoidal rule
The other integral gives
$$
\int_{x_0-h}^{x_0}f(x)dx=\frac{h}{2}\left(f(x_0) + f(x_0-h)\right)+O(h^3),
$$
and adding up we obtain
<!-- Equation labels as ordinary links -->
<div id="eq:trapez"></div>
$$
\begin{equation}
\int_{x_0-h}^{x_0+h}f(x)dx=\frac{h}{2}\left(f(x_0+h) + 2f(x_0) + f(x_0-h)\right)+O(h^3),
\label{eq:trapez} \tag{3}
\end{equation}
$$
which is the well-known trapezoidal rule. Concerning the error in the approximation made,
$O(h^3)=O((b-a)^3/N^3)$, you should note
that this is the local error. Since we are splitting the integral from
$a$ to $b$ in $N$ pieces, we will have to perform approximately $N$
such operations.
<!-- !split -->
## Global error
This means that the *global error* goes like $\approx O(h^2)$.
The trapezoidal reads then
<!-- Equation labels as ordinary links -->
<div id="eq:trapez1"></div>
$$
\begin{equation}
I=\int_a^bf(x) dx=h\left(f(a)/2 + f(a+h) +f(a+2h)+
\dots +f(b-h)+ f_{b}/2\right),
\label{eq:trapez1} \tag{4}
\end{equation}
$$
with a global error which goes like $O(h^2)$.
Hereafter we use the shorthand notations $f_{-h}=f(x_0-h)$, $f_{0}=f(x_0)$
and $f_{h}=f(x_0+h)$.
<!-- !split -->
## Error in the trapezoidal rule
The correct mathematical expression for the local error for the trapezoidal rule is
$$
\int_a^bf(x)dx -\frac{b-a}{2}\left[f(a)+f(b)\right]=-\frac{h^3}{12}f^{(2)}(\xi),
$$
and the global error reads
$$
\int_a^bf(x)dx -T_h(f)=-\frac{b-a}{12}h^2f^{(2)}(\xi),
$$
where $T_h$ is the trapezoidal result and $\xi \in [a,b]$.
<!-- !split -->
## Algorithm for the trapezoidal rule
The trapezoidal rule is easy to implement numerically
through the following simple algorithm
* Choose the number of mesh points and fix the step length.
* calculate $f(a)$ and $f(b)$ and multiply with $h/2$.
* Perform a loop over $n=1$ to $n-1$ ($f(a)$ and $f(b)$ are known) and sum up the terms $f(a+h) +f(a+2h)+f(a+3h)+\dots +f(b-h)$. Each step in the loop corresponds to a given value $a+nh$.
* Multiply the final result by $h$ and add $hf(a)/2$ and $hf(b)/2$.
<!-- !split -->
## Code example
A simple function which implements this algorithm is as follows
double TrapezoidalRule(double a, double b, int n, double (*func)(double))
{
double TrapezSum;
double fa, fb, x, step;
int j;
step=(b-a)/((double) n);
fa=(*func)(a)/2. ;
fb=(*func)(b)/2. ;
TrapezSum=0.;
for (j=1; j <= n-1; j++){
x=j*step+a;
TrapezSum+=(*func)(x);
}
TrapezSum=(TrapezSum+fb+fa)*step;
return TrapezSum;
} // end TrapezoidalRule
The function returns a new value for the specific integral through the variable **TrapezSum**.
<!-- !split -->
## Transfer of function names
There is one new feature to note here, namely
the transfer of a user defined function called **func** in the
definition
void TrapezoidalRule(double a, double b, int n, double *TrapezSum, double (*func)(double) )
What happens here is that we are transferring a pointer to the name
of a user defined function, which has as input a double precision variable and returns
a double precision number. The function **TrapezoidalRule** is called as
TrapezoidalRule(a, b, n, &MyFunction )
in the calling function. We note that **a**, **b** and **n** are called by value,
while **TrapezSum** and the user defined function **MyFunction**
are called by reference.
## Going back to Python, why?
**Symbolic calculations and numerical calculations in one code!**
Python offers an extremely versatile programming environment, allowing for
the inclusion of analytical studies in a numerical program. Here we show an
example code with the **trapezoidal rule** using **SymPy** to evaluate an integral and compute the absolute error
with respect to the numerically evaluated one of the integral
$4\int_0^1 dx/(1+x^2) = \pi$:
```
from math import *
from sympy import *
def Trapez(a,b,f,n):
h = (b-a)/float(n)
s = 0
x = a
for i in range(1,n,1):
x = x+h
s = s+ f(x)
s = 0.5*(f(a)+f(b)) +s
return h*s
# function to compute pi
def function(x):
return 4.0/(1+x*x)
a = 0.0; b = 1.0; n = 100
result = Trapez(a,b,function,n)
print("Trapezoidal rule=", result)
# define x as a symbol to be used by sympy
x = Symbol('x')
exact = integrate(function(x), (x, 0.0, 1.0))
print("Sympy integration=", exact)
# Find relative error
print("Relative error", abs((exact-result)/exact))
```
## Error analysis
The following extended version of the trapezoidal rule allows you to plot the relative error by comparing with the exact result. By increasing to $10^8$ points one arrives at a region where numerical errors start to accumulate.
```
%matplotlib inline
from math import log10
import numpy as np
from sympy import Symbol, integrate
import matplotlib.pyplot as plt
# function for the trapezoidal rule
def Trapez(a,b,f,n):
h = (b-a)/float(n)
s = 0
x = a
for i in range(1,n,1):
x = x+h
s = s+ f(x)
s = 0.5*(f(a)+f(b)) +s
return h*s
# function to compute pi
def function(x):
return 4.0/(1+x*x)
# define integration limits
a = 0.0; b = 1.0;
# find result from sympy
# define x as a symbol to be used by sympy
x = Symbol('x')
exact = integrate(function(x), (x, a, b))
# set up the arrays for plotting the relative error
n = np.zeros(9); y = np.zeros(9);
# find the relative error as function of integration points
for i in range(1, 8, 1):
npts = 10**i
result = Trapez(a,b,function,npts)
RelativeError = abs((exact-result)/exact)
n[i] = log10(npts); y[i] = log10(RelativeError);
plt.plot(n,y, 'ro')
plt.xlabel('n')
plt.ylabel('Relative error')
plt.show()
```
## Integrating numerical mathematics with calculus
The last example shows the potential of combining numerical algorithms with
symbolic calculations, allowing us thereby to
* Validate and verify our algorithms.
* Including concepts like unit testing, one has the possibility to test and validate several or all parts of the code.
* Validation and verification are then included *naturally*.
* The above example allows you to test the mathematical error of the algorithm for the trapezoidal rule by changing the number of integration points. You get trained from day one to think error analysis.
<!-- !split -->
## The rectangle method
Another very simple approach is the so-called midpoint or rectangle method.
In this case the integration area is split in a given number of rectangles with length $h$ and height given by the mid-point value of the function. This gives the following simple rule for approximating an integral
<!-- Equation labels as ordinary links -->
<div id="eq:rectangle"></div>
$$
\begin{equation}
I=\int_a^bf(x) dx \approx h\sum_{i=1}^N f(x_{i-1/2}),
\label{eq:rectangle} \tag{5}
\end{equation}
$$
where $f(x_{i-1/2})$ is the midpoint value of $f$ for a given rectangle. We will discuss its truncation
error below. It is easy to implement this algorithm, as shown here
double RectangleRule(double a, double b, int n, double (*func)(double))
{
double RectangleSum;
double fa, fb, x, step;
int j;
step=(b-a)/((double) n);
RectangleSum=0.;
for (j = 0; j <= n; j++){
x = (j+0.5)*step+; // midpoint of a given rectangle
RectangleSum+=(*func)(x); // add value of function.
}
RectangleSum *= step; // multiply with step length.
return RectangleSum;
} // end RectangleRule
<!-- !split -->
## Truncation error for the rectangular rule
The correct mathematical expression for the local error for the rectangular rule $R_i(h)$ for element $i$ is
$$
\int_{-h}^hf(x)dx - R_i(h)=-\frac{h^3}{24}f^{(2)}(\xi),
$$
and the global error reads
$$
\int_a^bf(x)dx -R_h(f)=-\frac{b-a}{24}h^2f^{(2)}(\xi),
$$
where $R_h$ is the result obtained with rectangular rule and $\xi \in [a,b]$.
<!-- !split -->
## Second-order polynomial
Instead of using the above first-order polynomials
approximations for $f$, we attempt at using a second-order polynomials.
In this case we need three points in order to define a second-order
polynomial approximation
$$
f(x) \approx P_2(x)=a_0+a_1x+a_2x^2.
$$
Using again Lagrange's interpolation formula we have
$$
P_2(x)=\frac{(x-x_0)(x-x_1)}{(x_2-x_0)(x_2-x_1)}y_2+
\frac{(x-x_0)(x-x_2)}{(x_1-x_0)(x_1-x_2)}y_1+
\frac{(x-x_1)(x-x_2)}{(x_0-x_1)(x_0-x_2)}y_0.
$$
Inserting this formula in the integral of Eq. ([eq:hhint](#eq:hhint)) we obtain
$$
\int_{-h}^{+h}f(x)dx=\frac{h}{3}\left(f_h + 4f_0 + f_{-h}\right)+O(h^5),
$$
which is Simpson's rule.
<!-- !split -->
## Simpson's rule
Note that the improved accuracy in the evaluation of
the derivatives gives a better error approximation, $O(h^5)$ vs.\ $O(h^3)$ .
But this is again the *local error approximation*.
Using Simpson's rule we can easily compute
the integral of Eq. ([eq:integraldef](#eq:integraldef)) to be
<!-- Equation labels as ordinary links -->
<div id="eq:simpson"></div>
$$
\begin{equation}
I=\int_a^bf(x) dx=\frac{h}{3}\left(f(a) + 4f(a+h) +2f(a+2h)+
\dots +4f(b-h)+ f_{b}\right),
\label{eq:simpson} \tag{6}
\end{equation}
$$
with a global error which goes like $O(h^4)$.
<!-- !split -->
## Mathematical expressions for the truncation error
More formal expressions for the local and global errors are for the local error
$$
\int_a^bf(x)dx -\frac{b-a}{6}\left[f(a)+4f((a+b)/2)+f(b)\right]=-\frac{h^5}{90}f^{(4)}(\xi),
$$
and for the global error
$$
\int_a^bf(x)dx -S_h(f)=-\frac{b-a}{180}h^4f^{(4)}(\xi).
$$
with $\xi\in[a,b]$ and $S_h$ the results obtained with Simpson's method.
<!-- !split -->
## Algorithm for Simpson's rule
The method
can easily be implemented numerically through the following simple algorithm
* Choose the number of mesh points and fix the step.
* calculate $f(a)$ and $f(b)$
* Perform a loop over $n=1$ to $n-1$ ($f(a)$ and $f(b)$ are known) and sum up the terms $4f(a+h) +2f(a+2h)+4f(a+3h)+\dots +4f(b-h)$. Each step in the loop corresponds to a given value $a+nh$. Odd values of $n$ give $4$ as factor while even values yield $2$ as factor.
* Multiply the final result by $\frac{h}{3}$.
<!-- !split -->
## Summary for equal-step methods
In more general terms, what we have done here is to approximate a given function $f(x)$ with a polynomial
of a certain degree. One can show that
given $n+1$ distinct points $x_0,\dots, x_n\in[a,b]$ and $n+1$ values $y_0,\dots,y_n$ there exists a
unique polynomial $P_n(x)$ with the property
$$
P_n(x_j) = y_j\hspace{0.5cm} j=0,\dots,n
$$
<!-- !split -->
## Lagrange's polynomial
In the Lagrange representation the interpolating polynomial is given by
$$
P_n = \sum_{k=0}^nl_ky_k,
$$
with the Lagrange factors
$$
l_k(x) = \prod_{\begin{array}{c}i=0 \\ i\ne k\end{array}}^n\frac{x-x_i}{x_k-x_i}\hspace{0.2cm} k=0,\dots,n.
$$
<!-- !split -->
## Polynomial approximation
If we for example set $n=1$, we obtain
$$
P_1(x) = y_0\frac{x-x_1}{x_0-x_1}+y_1\frac{x-x_0}{x_1-x_0}=\frac{y_1-y_0}{x_1-x_0}x-\frac{y_1x_0+y_0x_1}{x_1-x_0},
$$
which we recognize as the equation for a straight line.
The polynomial interpolatory quadrature of order $n$ with equidistant quadrature points $x_k=a+kh$
and step $h=(b-a)/n$ is called the Newton-Cotes quadrature formula of order $n$.
## Gaussian Quadrature
The methods we have presented hitherto are tailored to problems where the
mesh points $x_i$ are equidistantly spaced, $x_i$ differing from $x_{i+1}$ by the step $h$.
The basic idea behind all integration methods is to approximate the integral
$$
I=\int_a^bf(x)dx \approx \sum_{i=1}^N\omega_if(x_i),
$$
where $\omega$ and $x$ are the weights and the chosen mesh points, respectively.
In our previous discussion, these mesh points were fixed at the beginning, by choosing
a given number of points $N$. The weigths $\omega$ resulted then from the integration
method we applied. Simpson's rule, see Eq. ([eq:simpson](#eq:simpson)) would give
$$
\omega : \left\{h/3,4h/3,2h/3,4h/3,\dots,4h/3,h/3\right\},
$$
for the weights, while the trapezoidal rule resulted in
$$
\omega : \left\{h/2,h,h,\dots,h,h/2\right\}.
$$
## Gaussian Quadrature, main idea
In general, an integration formula which is based on a Taylor series using $N$ points,
will integrate exactly a polynomial $P$ of degree $N-1$. That is, the $N$ weights
$\omega_n$ can be chosen to satisfy $N$ linear equations, see chapter 3 of Ref.\ [3].
A greater precision for a given amount of numerical work can be achieved
if we are willing to give up the requirement of equally spaced integration points.
In Gaussian quadrature (hereafter GQ), both the mesh points and the weights are to
be determined. The points will not be equally spaced.
The theory behind GQ is to obtain an arbitrary weight $\omega$ through the use of
so-called orthogonal polynomials. These polynomials are orthogonal in some
interval say e.g., [-1,1]. Our points $x_i$ are chosen in some optimal sense subject
only to the constraint that they should lie in this interval. Together with the weights
we have then $2N$ ($N$ the number of points) parameters at our disposal.
## Gaussian Quadrature
Even though the integrand is not smooth, we could render it smooth by extracting
from it the weight function of an orthogonal polynomial, i.e.,
we are rewriting
<!-- Equation labels as ordinary links -->
<div id="eq:generalint"></div>
$$
\begin{equation}
I= \int_a^b f(x)dx =\int_a^b W(x)g(x)dx \approx \sum_{i=1}^N\omega_ig(x_i),
\label{eq:generalint} \tag{7}
\end{equation}
$$
where $g$ is smooth and $W$ is the weight function, which is to be associated with a given
orthogonal polynomial. Note that with a given weight function we end up evaluating the integrand
for the function $g(x_i)$.
## Gaussian Quadrature, weight function
The weight function $W$ is non-negative in the integration interval
$x\in [a,b]$ such that
for any $n \ge 0$, the integral $\int_a^b |x|^n W(x) dx$ is integrable. The naming
weight function arises from the fact that it may be used to give more emphasis
to one part of the interval than another.
A quadrature formula
<!-- Equation labels as ordinary links -->
<div id="_auto1"></div>
$$
\begin{equation}
\int_a^b W(x)f(x)dx \approx \sum_{i=1}^N\omega_if(x_i),
\label{_auto1} \tag{8}
\end{equation}
$$
with $N$ distinct quadrature points (mesh points) is a called a Gaussian quadrature
formula if it integrates all polynomials $p\in P_{2N-1}$ exactly, that is
<!-- Equation labels as ordinary links -->
<div id="_auto2"></div>
$$
\begin{equation}
\int_a^bW(x)p(x)dx =\sum_{i=1}^N\omega_ip(x_i),
\label{_auto2} \tag{9}
\end{equation}
$$
It is assumed that $W(x)$ is continuous and positive and that the integral
$$
\int_a^bW(x)dx
$$
exists. Note that the replacement of $f\rightarrow Wg$ is normally a better approximation
due to the fact that we may isolate possible singularities of $W$ and its
derivatives at the endpoints of the interval.
## Gaussian Quadrature weights and integration points
The quadrature weights or just weights (not to be confused with the weight function)
are positive and the sequence of Gaussian quadrature formulae is convergent
if the sequence $Q_N$ of quadrature formulae
$$
Q_N(f)\rightarrow Q(f)=\int_a^bf(x)dx,
$$
in the limit $N\rightarrow \infty$.
## Gaussian Quadrature
Then we say that the sequence
$$
Q_N(f) = \sum_{i=1}^N\omega_i^{(N)}f(x_i^{(N)}),
$$
is convergent for all polynomials $p$, that is
$$
Q_N(p) = Q(p)
$$
if there exits a constant $C$ such that
$$
\sum_{i=1}^N|\omega_i^{(N)}| \le C,
$$
for all $N$ which are natural numbers.
## Error in Gaussian Quadrature
The error for the Gaussian quadrature formulae of order $N$ is given
by
$$
\int_a^bW(x)f(x)dx-\sum_{k=1}^Nw_kf(x_k)=\frac{f^{2N}(\xi)}{(2N)!}\int_a^bW(x)[q_{N}(x)]^2dx
$$
where $q_{N}$ is the chosen orthogonal polynomial and $\xi$ is a number in the interval $[a,b]$.
We have assumed that $f\in C^{2N}[a,b]$, viz. the space of all real or complex $2N$ times continuously
differentiable functions.
## Important polynomials in Gaussian Quadrature
In science there are several important orthogonal polynomials which arise
from the solution of differential equations. Well-known examples are the
Legendre, Hermite, Laguerre and Chebyshev polynomials. They have the following weight functions
<table border="1">
<thead>
<tr><th align="center"> Weight function </th> <th align="center"> Interval </th> <th align="center">Polynomial</th> </tr>
</thead>
<tbody>
<tr><td align="right"> $W(x)=1$ </td> <td align="right"> $x\in [-1,1]$ </td> <td align="right"> Legendre </td> </tr>
<tr><td align="right"> $W(x)=e^{-x^2}$ </td> <td align="right"> $-\infty \le x \le \infty$ </td> <td align="right"> Hermite </td> </tr>
<tr><td align="right"> $W(x)=x^{\alpha}e^{-x}$ </td> <td align="right"> $0 \le x \le \infty$ </td> <td align="right"> Laguerre </td> </tr>
<tr><td align="right"> $W(x)=1/(\sqrt{1-x^2})$ </td> <td align="right"> $-1 \le x \le 1$ </td> <td align="right"> Chebyshev </td> </tr>
</tbody>
</table>
The importance of the use of orthogonal polynomials in the evaluation
of integrals can be summarized as follows.
## Gaussian Quadrature, win-win situation
Methods based on Taylor series using $N$ points will integrate exactly a polynomial $P$ of degree $N-1$. If a function $f(x)$ can be approximated with a polynomial of degree $N-1$
$$
f(x)\approx P_{N-1}(x),
$$
with $N$ mesh points we should be able to integrate exactly the polynomial $P_{N-1}$.
Gaussian quadrature methods promise more than this. We can get a better polynomial approximation with order greater than $N$ to $f(x)$ and still get away with only $N$ mesh points. More precisely, we approximate
$$
f(x) \approx P_{2N-1}(x),
$$
and with only $N$ mesh points these methods promise that
$$
\int f(x)dx \approx \int P_{2N-1}(x)dx=\sum_{i=0}^{N-1} P_{2N-1}(x_i)\omega_i,
$$
## Gaussian Quadrature, determining mesh points and weights
The reason why we can represent a function $f(x)$ with a polynomial of degree
$2N-1$ is due to the fact that we have $2N$ equations, $N$ for the mesh points and $N$
for the weights.
*The mesh points are the zeros of the chosen orthogonal polynomial* of
order $N$, and the weights are determined from the inverse of a matrix.
An orthogonal polynomials of degree $N$ defined in an interval $[a,b]$
has precisely $N$ distinct zeros on the open interval $(a,b)$.
Before we detail how to obtain mesh points and weights with orthogonal
polynomials, let us revisit some features of orthogonal polynomials
by specializing to Legendre polynomials. In the text below, we reserve
hereafter the labelling
$L_N$ for a Legendre polynomial of order $N$, while $P_N$ is an arbitrary polynomial
of order $N$.
These polynomials form then the basis for the Gauss-Legendre method.
## Orthogonal polynomials, Legendre
The Legendre polynomials are the solutions of an important
differential equation in Science, namely
$$
C(1-x^2)P-m_l^2P+(1-x^2)\frac{d}{dx}\left((1-x^2)\frac{dP}{dx}\right)=0.
$$
Here $C$ is a constant. For $m_l=0$ we obtain the Legendre polynomials
as solutions, whereas $m_l \ne 0$ yields the so-called associated Legendre
polynomials. This differential equation arises in for example the solution
of the angular dependence of Schroedinger's
equation with spherically symmetric potentials such as
the Coulomb potential.
## Orthogonal polynomials, Legendre
The corresponding polynomials $P$ are
$$
L_k(x)=\frac{1}{2^kk!}\frac{d^k}{dx^k}(x^2-1)^k \hspace{1cm} k=0,1,2,\dots,
$$
which, up to a factor, are the Legendre polynomials $L_k$.
The latter fulfil the orthogonality relation
<!-- Equation labels as ordinary links -->
<div id="eq:ortholeg"></div>
$$
\begin{equation}
\int_{-1}^1L_i(x)L_j(x)dx=\frac{2}{2i+1}\delta_{ij},
\label{eq:ortholeg} \tag{10}
\end{equation}
$$
and the recursion relation
<!-- Equation labels as ordinary links -->
<div id="eq:legrecur"></div>
$$
\begin{equation}
(j+1)L_{j+1}(x)+jL_{j-1}(x)-(2j+1)xL_j(x)=0.
\label{eq:legrecur} \tag{11}
\end{equation}
$$
## Orthogonal polynomials, Legendre
It is common to choose the normalization condition
$$
L_N(1)=1.
$$
With these equations we can determine a Legendre polynomial of arbitrary order
with input polynomials of order $N-1$ and $N-2$.
As an example, consider the determination of $L_0$, $L_1$ and $L_2$.
We have that
$$
L_0(x) = c,
$$
with $c$ a constant. Using the normalization equation $L_0(1)=1$
we get that
$$
L_0(x) = 1.
$$
## Orthogonal polynomials, Legendre
For $L_1(x)$ we have the general expression
$$
L_1(x) = a+bx,
$$
and using the orthogonality relation
$$
\int_{-1}^1L_0(x)L_1(x)dx=0,
$$
we obtain $a=0$ and with the condition $L_1(1)=1$, we obtain $b=1$, yielding
$$
L_1(x) = x.
$$
## Orthogonal polynomials, Legendre
We can proceed in a similar fashion in order to determine
the coefficients of $L_2$
$$
L_2(x) = a+bx+cx^2,
$$
using the orthogonality relations
$$
\int_{-1}^1L_0(x)L_2(x)dx=0,
$$
and
$$
\int_{-1}^1L_1(x)L_2(x)dx=0,
$$
and the condition
$L_2(1)=1$ we would get
<!-- Equation labels as ordinary links -->
<div id="eq:l2"></div>
$$
\begin{equation}
L_2(x) = \frac{1}{2}\left(3x^2-1\right).
\label{eq:l2} \tag{12}
\end{equation}
$$
## Orthogonal polynomials, Legendre
We note that we have three equations to determine the three coefficients
$a$, $b$ and $c$.
Alternatively, we could have
employed the recursion relation of Eq. ([eq:legrecur](#eq:legrecur)), resulting in
$$
2L_2(x)=3xL_1(x)-L_0,
$$
which leads to Eq. ([eq:l2](#eq:l2)).
## Orthogonal polynomials, Legendre
The orthogonality relation above is important in our discussion
on how to obtain the weights and mesh points. Suppose we have an arbitrary
polynomial $Q_{N-1}$ of order $N-1$ and a Legendre polynomial $L_N(x)$ of
order $N$. We could represent $Q_{N-1}$
by the Legendre polynomials through
<!-- Equation labels as ordinary links -->
<div id="eq:legexpansion"></div>
$$
\begin{equation}
Q_{N-1}(x)=\sum_{k=0}^{N-1}\alpha_kL_{k}(x),
\label{eq:legexpansion} \tag{13}
\end{equation}
$$
where $\alpha_k$'s are constants.
Using the orthogonality relation of Eq. ([eq:ortholeg](#eq:ortholeg)) we see that
<!-- Equation labels as ordinary links -->
<div id="eq:ortholeg2"></div>
$$
\begin{equation}
\int_{-1}^1L_N(x)Q_{N-1}(x)dx=\sum_{k=0}^{N-1} \int_{-1}^1L_N(x) \alpha_kL_{k}(x)dx=0.
\label{eq:ortholeg2} \tag{14}
\end{equation}
$$
We will use this result in our construction of mesh points and weights
in the next subsection.
## Orthogonal polynomials, Legendre
In summary, the first few Legendre polynomials are
6
1
<
<
<
!
!
M
A
T
H
_
B
L
O
C
K
6
2
<
<
<
!
!
M
A
T
H
_
B
L
O
C
K
6
3
<
<
<
!
!
M
A
T
H
_
B
L
O
C
K
$$
L_3(x) = (5x^3-3x)/2,
$$
and
$$
L_4(x) = (35x^4-30x^2+3)/8.
$$
## Orthogonal polynomials, simple code for Legendre polynomials
The following simple function implements the above recursion relation
of Eq. ([eq:legrecur](#eq:legrecur)).
for computing Legendre polynomials of order $N$.
// This function computes the Legendre polynomial of degree N
double Legendre( int n, double x)
{
double r, s, t;
int m;
r = 0; s = 1.;
// Use recursion relation to generate p1 and p2
for (m=0; m < n; m++ )
{
t = r; r = s;
s = (2*m+1)*x*r - m*t;
s /= (m+1);
} // end of do loop
return s;
} // end of function Legendre
The variable $s$ represents $L_{j+1}(x)$, while $r$ holds
$L_j(x)$ and $t$ the value $L_{j-1}(x)$.
## Integration points and weights with orthogonal polynomials
To understand how the weights and the mesh points are generated, we define first
a polynomial of degree $2N-1$ (since we have $2N$ variables at hand, the mesh points
and weights for $N$ points). This polynomial can be represented through polynomial
division by
$$
P_{2N-1}(x)=L_N(x)P_{N-1}(x)+Q_{N-1}(x),
$$
where $P_{N-1}(x)$ and $Q_{N-1}(x)$ are some polynomials of degree $N-1$ or less.
The function $L_N(x)$ is a Legendre polynomial of order $N$.
Recall that we wanted to approximate an arbitrary function $f(x)$ with a
polynomial $P_{2N-1}$ in order to evaluate
$$
\int_{-1}^1f(x)dx\approx \int_{-1}^1P_{2N-1}(x)dx.
$$
## Integration points and weights with orthogonal polynomials
We can use Eq. ([eq:ortholeg2](#eq:ortholeg2))
to rewrite the above integral as
$$
\int_{-1}^1P_{2N-1}(x)dx=\int_{-1}^1(L_N(x)P_{N-1}(x)+Q_{N-1}(x))dx=\int_{-1}^1Q_{N-1}(x)dx,
$$
due to the orthogonality properties of the Legendre polynomials. We see that it suffices
to evaluate the integral over $\int_{-1}^1Q_{N-1}(x)dx$ in order to evaluate
$\int_{-1}^1P_{2N-1}(x)dx$. In addition, at the points $x_k$ where $L_N$ is zero, we have
$$
P_{2N-1}(x_k)=Q_{N-1}(x_k)\hspace{1cm} k=0,1,\dots, N-1,
$$
and we see that through these $N$ points we can fully define $Q_{N-1}(x)$ and thereby the
integral. Note that we have chosen to let the numbering of the points run from $0$ to $N-1$.
The reason for this choice is that we wish to have the same numbering as the order of a
polynomial of degree $N-1$. This numbering will be useful below when we introduce the matrix
elements which define the integration weights $w_i$.
## Integration points and weights with orthogonal polynomials
We develope then $Q_{N-1}(x)$ in terms of Legendre polynomials,
as done in Eq. ([eq:legexpansion](#eq:legexpansion)),
<!-- Equation labels as ordinary links -->
<div id="eq:lsum1"></div>
$$
\begin{equation}
Q_{N-1}(x)=\sum_{i=0}^{N-1}\alpha_iL_i(x).
\label{eq:lsum1} \tag{15}
\end{equation}
$$
Using the orthogonality property of the Legendre polynomials we have
$$
\int_{-1}^1Q_{N-1}(x)dx=\sum_{i=0}^{N-1}\alpha_i\int_{-1}^1L_0(x)L_i(x)dx=2\alpha_0,
$$
where we have just inserted $L_0(x)=1$!
## Integration points and weights with orthogonal polynomials
Instead of an integration problem we need now to define the coefficient $\alpha_0$.
Since we know the values of $Q_{N-1}$ at the zeros of $L_N$, we may rewrite
Eq. ([eq:lsum1](#eq:lsum1)) as
<!-- Equation labels as ordinary links -->
<div id="eq:lsum2"></div>
$$
\begin{equation}
Q_{N-1}(x_k)=\sum_{i=0}^{N-1}\alpha_iL_i(x_k)=\sum_{i=0}^{N-1}\alpha_iL_{ik} \hspace{1cm} k=0,1,\dots, N-1.
\label{eq:lsum2} \tag{16}
\end{equation}
$$
Since the Legendre polynomials are linearly independent of each other, none
of the columns in the matrix $L_{ik}$ are linear combinations of the others.
## Integration points and weights with orthogonal polynomials
This means that the matrix $L_{ik}$ has an inverse with the properties
$$
\hat{L}^{-1}\hat{L} = \hat{I}.
$$
Multiplying both sides of Eq. ([eq:lsum2](#eq:lsum2)) with $\sum_{j=0}^{N-1}L_{ji}^{-1}$ results in
<!-- Equation labels as ordinary links -->
<div id="eq:lsum3"></div>
$$
\begin{equation}
\sum_{i=0}^{N-1}(L^{-1})_{ki}Q_{N-1}(x_i)=\alpha_k.
\label{eq:lsum3} \tag{17}
\end{equation}
$$
## Integration points and weights with orthogonal polynomials
We can derive this result in an alternative way by defining the vectors
$$
\hat{x}_k=\left(\begin{array} {c} x_0\\
x_1\\
.\\
.\\
x_{N-1}\end{array}\right) \hspace{0.5cm}
\hat{\alpha}=\left(\begin{array} {c} \alpha_0\\
\alpha_1\\
.\\
.\\
\alpha_{N-1}\end{array}\right),
$$
and the matrix
$$
\hat{L}=\left(\begin{array} {cccc} L_0(x_0) & L_1(x_0) &\dots &L_{N-1}(x_0)\\
L_0(x_1) & L_1(x_1) &\dots &L_{N-1}(x_1)\\
\dots & \dots &\dots &\dots\\
L_0(x_{N-1}) & L_1(x_{N-1}) &\dots &L_{N-1}(x_{N-1})
\end{array}\right).
$$
## Integration points and weights with orthogonal polynomials
We have then
$$
Q_{N-1}(\hat{x}_k) = \hat{L}\hat{\alpha},
$$
yielding (if $\hat{L}$ has an inverse)
$$
\hat{L}^{-1}Q_{N-1}(\hat{x}_k) = \hat{\alpha},
$$
which is Eq. ([eq:lsum3](#eq:lsum3)).
## Integration points and weights with orthogonal polynomials
Using the above results and the fact that
$$
\int_{-1}^1P_{2N-1}(x)dx=\int_{-1}^1Q_{N-1}(x)dx,
$$
we get
$$
\int_{-1}^1P_{2N-1}(x)dx=\int_{-1}^1Q_{N-1}(x)dx=2\alpha_0=
2\sum_{i=0}^{N-1}(L^{-1})_{0i}P_{2N-1}(x_i).
$$
## Integration points and weights with orthogonal polynomials
If we identify the weights with $2(L^{-1})_{0i}$, where the points $x_i$ are
the zeros of $L_N$, we have an integration formula of the type
$$
\int_{-1}^1P_{2N-1}(x)dx=\sum_{i=0}^{N-1}\omega_iP_{2N-1}(x_i)
$$
and if our function $f(x)$ can be approximated by a polynomial $P$ of degree
$2N-1$, we have finally that
$$
\int_{-1}^1f(x)dx\approx \int_{-1}^1P_{2N-1}(x)dx=\sum_{i=0}^{N-1}\omega_iP_{2N-1}(x_i) .
$$
In summary, the mesh points $x_i$ are defined by the zeros of an orthogonal polynomial of degree $N$, that is
$L_N$, while the weights are
given by $2(L^{-1})_{0i}$.
## Application to the case $N=2$
Let us apply the above formal results to the case $N=2$.
This means that we can approximate a function $f(x)$ with a
polynomial $P_3(x)$ of order $2N-1=3$.
The mesh points are the zeros of $L_2(x)=1/2(3x^2-1)$.
These points are $x_0=-1/\sqrt{3}$ and $x_1=1/\sqrt{3}$.
Specializing Eq. ([eq:lsum2](#eq:lsum2))
$$
Q_{N-1}(x_k)=\sum_{i=0}^{N-1}\alpha_iL_i(x_k) \hspace{1cm} k=0,1,\dots, N-1.
$$
to $N=2$ yields
$$
Q_1(x_0)=\alpha_0-\alpha_1\frac{1}{\sqrt{3}},
$$
and
$$
Q_1(x_1)=\alpha_0+\alpha_1\frac{1}{\sqrt{3}},
$$
since $L_0(x=\pm 1/\sqrt{3})=1$ and $L_1(x=\pm 1/\sqrt{3})=\pm 1/\sqrt{3}$.
## Application to the case $N=2$
The matrix $L_{ik}$ defined in Eq. ([eq:lsum2](#eq:lsum2)) is then
$$
\hat{L}=\left(\begin{array} {cc} 1 & -\frac{1}{\sqrt{3}}\\
1 & \frac{1}{\sqrt{3}}\end{array}\right),
$$
with an inverse given by
$$
\hat{L}^{-1}=\frac{\sqrt{3}}{2}\left(\begin{array} {cc} \frac{1}{\sqrt{3}} & \frac{1}{\sqrt{3}}\\
-1 & 1\end{array}\right).
$$
The weights are given by the matrix elements $2(L_{0k})^{-1}$. We have thence
$\omega_0=1$ and $\omega_1=1$.
## Application to the case $N=2$
Obviously, there is no problem in changing the numbering of the matrix elements $i,k=0,1,2,\dots,N-1$ to
$i,k=1,2,\dots,N$. We have chosen to start from zero, since we deal with polynomials of degree $N-1$.
Summarizing, for Legendre polynomials with $N=2$ we have
weights
$$
\omega : \left\{1,1\right\},
$$
and mesh points
$$
x : \left\{-\frac{1}{\sqrt{3}},\frac{1}{\sqrt{3}}\right\}.
$$
## Application to the case $N=2$
If we wish to integrate
$$
\int_{-1}^1f(x)dx,
$$
with $f(x)=x^2$, we approximate
$$
I=\int_{-1}^1x^2dx \approx \sum_{i=0}^{N-1}\omega_ix_i^2.
$$
## Application to the case $N=2$
The exact answer is $2/3$. Using $N=2$ with the above two weights
and mesh points we get
$$
I=\int_{-1}^1x^2dx =\sum_{i=0}^{1}\omega_ix_i^2=\frac{1}{3}+\frac{1}{3}=\frac{2}{3},
$$
the exact answer!
If we were to emply the trapezoidal rule we would get
$$
I=\int_{-1}^1x^2dx =\frac{b-a}{2}\left((a)^2+(b)^2\right)/2=
\frac{1-(-1)}{2}\left((-1)^2+(1)^2\right)/2=1!
$$
With just two points we can calculate exactly the integral for a second-order
polynomial since our methods approximates the exact function with higher
order polynomial.
How many points do you need with the trapezoidal rule in order to achieve a
similar accuracy?
## General integration intervals for Gauss-Legendre
Note that the Gauss-Legendre method is not limited
to an interval [-1,1], since we can always through a change of variable
$$
t=\frac{b-a}{2}x+\frac{b+a}{2},
$$
rewrite the integral for an interval [a,b]
$$
\int_a^bf(t)dt=\frac{b-a}{2}\int_{-1}^1f\left(\frac{(b-a)x}{2}+\frac{b+a}{2}\right)dx.
$$
## Mapping integration points and weights
If we have an integral on the form
$$
\int_0^{\infty}f(t)dt,
$$
we can choose new mesh points and weights by using the mapping
$$
\tilde{x}_i=tan\left\{\frac{\pi}{4}(1+x_i)\right\},
$$
and
$$
\tilde{\omega}_i= \frac{\pi}{4}\frac{\omega_i}{cos^2\left(\frac{\pi}{4}(1+x_i)\right)},
$$
where $x_i$ and $\omega_i$ are the original mesh points and weights in the
interval $[-1,1]$, while $\tilde{x}_i$ and $\tilde{\omega}_i$ are the new
mesh points and weights for the interval $[0,\infty)$.
## Mapping integration points and weights
To see that this is correct by inserting the
the value of $x_i=-1$ (the lower end of the interval $[-1,1]$)
into the expression for $\tilde{x}_i$. That gives $\tilde{x}_i=0$,
the lower end of the interval $[0,\infty)$. For
$x_i=1$, we obtain $\tilde{x}_i=\infty$. To check that the new
weights are correct, recall that the weights should correspond to the
derivative of the mesh points. Try to convince yourself that the
above expression fulfills this condition.
## Other orthogonal polynomials, Laguerre polynomials
If we are able to rewrite our integral of Eq. ([eq:generalint](#eq:generalint)) with a
weight function $W(x)=x^{\alpha}e^{-x}$ with integration limits
$[0,\infty)$, we could then use the Laguerre polynomials.
The polynomials form then the basis for the Gauss-Laguerre method which can be applied
to integrals of the form
$$
I=\int_0^{\infty}f(x)dx =\int_0^{\infty}x^{\alpha}e^{-x}g(x)dx.
$$
## Other orthogonal polynomials, Laguerre polynomials
These polynomials arise from the solution of the differential
equation
$$
\left(\frac{d^2 }{dx^2}-\frac{d }{dx}+\frac{\lambda}{x}-\frac{l(l+1)}{x^2}\right){\cal L}(x)=0,
$$
where $l$ is an integer $l\ge 0$ and $\lambda$ a constant. This equation
arises for example from the solution of the radial Schr\"odinger equation with
a centrally symmetric potential such as the Coulomb potential.
## Other orthogonal polynomials, Laguerre polynomials
The first few polynomials are
1
0
1
<
<
<
!
!
M
A
T
H
_
B
L
O
C
K
1
0
2
<
<
<
!
!
M
A
T
H
_
B
L
O
C
K
1
0
3
<
<
<
!
!
M
A
T
H
_
B
L
O
C
K
$$
{\cal L}_3(x)=6-18x+9x^2-x^3,
$$
and
$$
{\cal L}_4(x)=x^4-16x^3+72x^2-96x+24.
$$
## Other orthogonal polynomials, Laguerre polynomials
They fulfil the orthogonality relation
$$
\int_{0}^{\infty}e^{-x}{\cal L}_n(x)^2dx=1,
$$
and the recursion relation
$$
(n+1){\cal L}_{n+1}(x)=(2n+1-x){\cal L}_{n}(x)-n{\cal L}_{n-1}(x).
$$
## Other orthogonal polynomials, Hermite polynomials
In a similar way, for an integral which goes like
$$
I=\int_{-\infty}^{\infty}f(x)dx =\int_{-\infty}^{\infty}e^{-x^2}g(x)dx.
$$
we could use the Hermite polynomials in order to extract weights and mesh points.
The Hermite polynomials are the solutions of the following differential
equation
<!-- Equation labels as ordinary links -->
<div id="eq:hermite"></div>
$$
\begin{equation}
\frac{d^2H(x)}{dx^2}-2x\frac{dH(x)}{dx}+
(\lambda-1)H(x)=0.
\label{eq:hermite} \tag{18}
\end{equation}
$$
## Other orthogonal polynomials, Hermite polynomials
A typical example is again the solution of Schrodinger's
equation, but this time with a harmonic oscillator potential.
The first few polynomials are
1
1
0
<
<
<
!
!
M
A
T
H
_
B
L
O
C
K
1
1
1
<
<
<
!
!
M
A
T
H
_
B
L
O
C
K
1
1
2
<
<
<
!
!
M
A
T
H
_
B
L
O
C
K
$$
H_3(x)=8x^3-12,
$$
and
$$
H_4(x)=16x^4-48x^2+12.
$$
They fulfil the orthogonality relation
$$
\int_{-\infty}^{\infty}e^{-x^2}H_n(x)^2dx=2^nn!\sqrt{\pi},
$$
and the recursion relation
$$
H_{n+1}(x)=2xH_{n}(x)-2nH_{n-1}(x).
$$
<!-- !split -->
## Demonstration of Gaussian Quadrature
Let us here compare three methods for integrating, namely the trapezoidal rule,
Simpson's method and the Gauss-Legendre approach.
We choose two functions to integrate:
$$
\int_1^{100}\frac{\exp{(-x)}}{x}dx,
$$
and
$$
\int_{0}^{3}\frac{1}{2+x^2}dx.
$$
<!-- !split -->
## Demonstration of Gaussian Quadrature, simple program
A program example which uses the trapezoidal rule, Simpson's rule
and the Gauss-Legendre method is included here.
#include <iostream>
#include "lib.h"
using namespace std;
// Here we define various functions called by the main program
// this function defines the function to integrate
double int_function(double x);
// Main function begins here
int main()
{
int n;
double a, b;
cout << "Read in the number of integration points" << endl;
cin >> n;
cout << "Read in integration limits" << endl;
cin >> a >> b;
// reserve space in memory for vectors containing the mesh points
// weights and function values for the use of the gauss-legendre
// method
double *x = new double [n];
double *w = new double [n];
// set up the mesh points and weights
gauss_legendre(a, b,x,w, n);
// evaluate the integral with the Gauss-Legendre method
// Note that we initialize the sum
double int_gauss = 0.;
for ( int i = 0; i < n; i++){
int_gauss+=w[i]*int_function(x[i]);
}
// final output
cout << "Trapez-rule = " << trapezoidal_rule(a, b,n, int_function)
<< endl;
cout << "Simpson's rule = " << simpson(a, b,n, int_function)
<< endl;
cout << "Gaussian quad = " << int_gauss << endl;
delete [] x;
delete [] w;
return 0;
} // end of main program
// this function defines the function to integrate
double int_function(double x)
{
double value = 4./(1.+x*x);
return value;
} // end of function to evaluate
<!-- !split -->
## Demonstration of Gaussian Quadrature
To be noted in this program is that we can transfer the name of a given function to integrate.
In the table here we show the results for the first integral using various
mesh points,.
<table border="1">
<thead>
<tr><th align="center">$N$ </th> <th align="center"> Trapez </th> <th align="center">Simpson </th> <th align="center">Gauss-Legendre</th> </tr>
</thead>
<tbody>
<tr><td align="right"> 10 </td> <td align="left"> 1.821020 </td> <td align="left"> 1.214025 </td> <td align="left"> 0.1460448 </td> </tr>
<tr><td align="right"> 20 </td> <td align="left"> 0.912678 </td> <td align="left"> 0.609897 </td> <td align="left"> 0.2178091 </td> </tr>
<tr><td align="right"> 40 </td> <td align="left"> 0.478456 </td> <td align="left"> 0.333714 </td> <td align="left"> 0.2193834 </td> </tr>
<tr><td align="right"> 100 </td> <td align="left"> 0.273724 </td> <td align="left"> 0.231290 </td> <td align="left"> 0.2193839 </td> </tr>
<tr><td align="right"> 1000 </td> <td align="left"> 0.219984 </td> <td align="left"> 0.219387 </td> <td align="left"> 0.2193839 </td> </tr>
</tbody>
</table>
We note here that, since the area over where we integrate is rather large and the integrand
goes slowly to zero for large values of $x$, both the trapezoidal rule and Simpson's method
need quite many points in order to approach the Gauss-Legendre method.
This integrand demonstrates clearly the strength of the Gauss-Legendre method
(and other GQ methods as well), viz., few points
are needed in order to achieve a very high precision.
<!-- !split -->
## Demonstration of Gaussian Quadrature
The second table however shows that for smaller integration intervals, both the trapezoidal rule
and Simpson's method compare well with the results obtained with the Gauss-Legendre
approach.
<table border="1">
<thead>
<tr><th align="center">$N$ </th> <th align="center"> Trapez </th> <th align="center">Simpson </th> <th align="center">Gauss-Legendre</th> </tr>
</thead>
<tbody>
<tr><td align="right"> 10 </td> <td align="left"> 0.798861 </td> <td align="left"> 0.799231 </td> <td align="left"> 0.799233 </td> </tr>
<tr><td align="right"> 20 </td> <td align="left"> 0.799140 </td> <td align="left"> 0.799233 </td> <td align="left"> 0.799233 </td> </tr>
<tr><td align="right"> 40 </td> <td align="left"> 0.799209 </td> <td align="left"> 0.799233 </td> <td align="left"> 0.799233 </td> </tr>
<tr><td align="right"> 100 </td> <td align="left"> 0.799229 </td> <td align="left"> 0.799233 </td> <td align="left"> 0.799233 </td> </tr>
<tr><td align="right"> 1000 </td> <td align="left"> 0.799233 </td> <td align="left"> 0.799233 </td> <td align="left"> 0.799233 </td> </tr>
</tbody>
</table>
## Comparing methods and using symbolic Python
The following python code allows you to run interactively either in a browser or using ipython notebook. It compares the trapezoidal rule and Gaussian quadrature with the exact result from symbolic python **SYMPY** up to 1000 integration points for the integral
$$
I = 2 = \int_0^{\infty} x^2 \exp{-x} dx.
$$
For the trapezoidal rule the results will vary strongly depending on how the infinity limit is approximated. Try to run the code below for different finite approximations to $\infty$.
```
from math import exp
import numpy as np
from sympy import Symbol, integrate, exp, oo
# function for the trapezoidal rule
def TrapezoidalRule(a,b,f,n):
h = (b-a)/float(n)
s = 0
x = a
for i in range(1,n,1):
x = x+h
s = s+ f(x)
s = 0.5*(f(a)+f(b)) +s
return h*s
# function for the Gaussian quadrature with Laguerre polynomials
def GaussLaguerreRule(n):
s = 0
xgauleg, wgauleg = np.polynomial.laguerre.laggauss(n)
for i in range(1,n,1):
s = s+ xgauleg[i]*xgauleg[i]*wgauleg[i]
return s
# function to compute
def function(x):
return x*x*exp(-x)
# Integration limits for the Trapezoidal rule
a = 0.0; b = 10000.0
# define x as a symbol to be used by sympy
x = Symbol('x')
# find result from sympy
exact = integrate(function(x), (x, a, oo))
# set up the arrays for plotting the relative error
n = np.zeros(40); Trapez = np.zeros(4); LagGauss = np.zeros(4);
# find the relative error as function of integration points
for i in range(1, 3, 1):
npts = 10**i
n[i] = npts
Trapez[i] = abs((TrapezoidalRule(a,b,function,npts)-exact)/exact)
LagGauss[i] = abs((GaussLaguerreRule(npts)-exact)/exact)
print "Integration points=", n[1], n[2]
print "Trapezoidal relative error=", Trapez[1], Trapez[2]
print "LagGuass relative error=", LagGauss[1], LagGauss[2]
```
## Treatment of Singular Integrals
So-called principal value (PV) integrals are often employed in physics,
from Green's functions for scattering to dispersion relations.
Dispersion relations are often related to measurable quantities
and provide important consistency checks in atomic, nuclear and
particle physics.
A PV integral is defined as
$$
I(x)={\cal P}\int_a^bdt\frac{f(t)}{t-x}=\lim_{\epsilon\rightarrow 0^+}
\left[\int_a^{x-\epsilon}dt\frac{f(t)}{t-x}+\int_{x+\epsilon}^bdt\frac{f(t)}{t-x}\right],
$$
and
arises in applications
of Cauchy's residue theorem when the pole $x$ lies
on the real axis within the interval of integration $[a,b]$. Here ${\cal P}$ stands for the principal value. *An important assumption is that the function $f(t)$ is continuous
on the interval of integration*.
## Treatment of Singular Integrals
In case $f(t)$ is a closed form expression or it has an analytic continuation
in the complex plane, it may be possible to obtain an expression on closed
form for the above integral.
However, the situation which we are often confronted with is that
$f(t)$ is only known at some points $t_i$ with corresponding
values $f(t_i)$. In order to obtain $I(x)$ we need to resort to a
numerical evaluation.
To evaluate such an integral, let us first rewrite it as
$$
{\cal P}\int_a^bdt\frac{f(t)}{t-x}=
\int_a^{x-\Delta}dt\frac{f(t)}{t-x}+\int_{x+\Delta}^bdt\frac{f(t)}{t-x}+
{\cal P}\int_{x-\Delta}^{x+\Delta}dt\frac{f(t)}{t-x},
$$
where we have isolated the principal value part in the last integral.
## Treatment of Singular Integrals, change of variables
Defining a new variable $u=t-x$, we can rewrite the principal value
integral as
<!-- Equation labels as ordinary links -->
<div id="eq:deltaint"></div>
$$
\begin{equation}
I_{\Delta}(x)={\cal P}\int_{-\Delta}^{+\Delta}du\frac{f(u+x)}{u}.
\label{eq:deltaint} \tag{19}
\end{equation}
$$
One possibility is to Taylor expand $f(u+x)$ around $u=0$, and compute
derivatives to a certain order as we did for the Trapezoidal rule or
Simpson's rule.
Since all terms with even powers of $u$ in the Taylor expansion dissapear,
we have that
$$
I_{\Delta}(x)\approx \sum_{n=0}^{N_{max}}f^{(2n+1)}(x)
\frac{\Delta^{2n+1}}{(2n+1)(2n+1)!}.
$$
## Treatment of Singular Integrals, higher-order derivatives
To evaluate higher-order derivatives may be both time
consuming and delicate from a numerical point of view, since
there is always the risk of loosing precision when calculating
derivatives numerically. Unless we have an analytic expression
for $f(u+x)$ and can evaluate the derivatives in a closed form,
the above approach is not the preferred one.
Rather, we show here how to use the Gauss-Legendre method
to compute Eq. ([eq:deltaint](#eq:deltaint)).
Let us first introduce a new variable $s=u/\Delta$ and rewrite
Eq. ([eq:deltaint](#eq:deltaint)) as
<!-- Equation labels as ordinary links -->
<div id="eq:deltaint2"></div>
$$
\begin{equation}
I_{\Delta}(x)={\cal P}\int_{-1}^{+1}ds\frac{f(\Delta s+x)}{s}.
\label{eq:deltaint2} \tag{20}
\end{equation}
$$
## Treatment of Singular Integrals
The integration limits are now from $-1$ to $1$, as for the Legendre
polynomials.
The principal value in Eq. ([eq:deltaint2](#eq:deltaint2)) is however rather tricky
to evaluate numerically, mainly since computers have limited
precision. We will here use a subtraction trick often used
when dealing with singular integrals in numerical calculations.
We introduce first the calculus relation
$$
\int_{-1}^{+1} \frac{ds}{s} =0.
$$
It means that the curve $1/(s)$ has equal and opposite
areas on both sides of the singular point $s=0$.
## Treatment of Singular Integrals
If we then note that $f(x)$ is just a constant, we have also
$$
f(x)\int_{-1}^{+1} \frac{ds}{s}=\int_{-1}^{+1}f(x) \frac{ds}{s} =0.
$$
Subtracting this equation from
Eq. ([eq:deltaint2](#eq:deltaint2)) yields
<!-- Equation labels as ordinary links -->
<div id="eq:deltaint3"></div>
$$
\begin{equation}
I_{\Delta}(x)={\cal P}\int_{-1}^{+1}ds\frac{f(\Delta s+x)}{s}=\int_{-1}^{+1}ds\frac{f(\Delta s+x)-f(x)}{s},
\label{eq:deltaint3} \tag{21}
\end{equation}
$$
and the integrand is no longer singular since we have that
$\lim_{s \rightarrow 0} (f(s+x) -f(x))=0$ and for the particular case
$s=0$ the integrand
is now finite.
## Treatment of Singular Integrals
Eq. ([eq:deltaint3](#eq:deltaint3)) is now rewritten using the Gauss-Legendre
method resulting in
<!-- Equation labels as ordinary links -->
<div id="eq:deltaint4"></div>
$$
\begin{equation}
\int_{-1}^{+1}ds\frac{f(\Delta s+x)-f(x)}{s}=\sum_{i=1}^{N}\omega_i\frac{f(\Delta s_i+x)-f(x)}{s_i},
\label{eq:deltaint4} \tag{22}
\end{equation}
$$
where $s_i$ are the mesh points ($N$ in total) and $\omega_i$ are the weights.
In the selection of mesh points for a PV integral, it is important
to use an even number of points, since an odd number of mesh
points always picks $s_i=0$ as one of the mesh points. The sum in
Eq. ([eq:deltaint4](#eq:deltaint4)) will then diverge.
## Treatment of Singular Integrals
Let us apply this method to the integral
<!-- Equation labels as ordinary links -->
<div id="eq:deltaint5"></div>
$$
\begin{equation}
I(x)={\cal P}\int_{-1}^{+1}dt\frac{e^t}{t}.
\label{eq:deltaint5} \tag{23}
\end{equation}
$$
The integrand diverges at $x=t=0$. We
rewrite it using Eq. ([eq:deltaint3](#eq:deltaint3)) as
<!-- Equation labels as ordinary links -->
<div id="eq:deltaint6"></div>
$$
\begin{equation}
{\cal P}\int_{-1}^{+1}dt\frac{e^t}{t}=\int_{-1}^{+1}\frac{e^t-1}{t},
\label{eq:deltaint6} \tag{24}
\end{equation}
$$
since $e^x=e^0=1$. With Eq. ([eq:deltaint4](#eq:deltaint4)) we have then
<!-- Equation labels as ordinary links -->
<div id="eq:deltaint7"></div>
$$
\begin{equation}
\int_{-1}^{+1}\frac{e^t-1}{t}\approx \sum_{i=1}^{N}\omega_i\frac{e^{t_i}-1}{t_i}.
\label{eq:deltaint7} \tag{25}
\end{equation}
$$
## Treatment of Singular Integrals
The exact results is $2.11450175075....$. With just two mesh points we recall
from the previous subsection that $\omega_1=\omega_2=1$ and that the mesh points are the zeros of $L_2(x)$, namely $x_1=-1/\sqrt{3}$ and
$x_2=1/\sqrt{3}$. Setting $N=2$ and inserting these values in the last
equation gives
$$
I_2(x=0)=\sqrt{3}\left(e^{1/\sqrt{3}}-e^{-1/\sqrt{3}}\right)=2.1129772845.
$$
With six mesh points we get even the exact result to the tenth digit
$$
I_6(x=0)=2.11450175075!
$$
## Treatment of Singular Integrals
We can repeat the above subtraction trick for more complicated
integrands.
First we modify the integration limits to $\pm \infty$ and use the fact
that
$$
\int_{-\infty}^{\infty} \frac{dk}{k-k_0}=
\int_{-\infty}^{0} \frac{dk}{k-k_0}+
\int_{0}^{\infty} \frac{dk}{k-k_0} =0.
$$
A change of variable $u=-k$ in the integral with limits from $-\infty$ to $0$ gives
$$
\int_{-\infty}^{\infty} \frac{dk}{k-k_0}=
\int_{\infty}^{0} \frac{-du}{-u-k_0}+
\int_{0}^{\infty} \frac{dk}{k-k_0}= \int_{0}^{\infty} \frac{dk}{-k-k_0}+
\int_{0}^{\infty} \frac{dk}{k-k_0}=0.
$$
## Treatment of Singular Integrals
It means that the curve $1/(k-k_0)$ has equal and opposite
areas on both sides of the singular point $k_0$. If we break
the integral into one over positive $k$ and one over
negative $k$, a change of variable $k\rightarrow -k$
allows us to rewrite the last equation as
$$
\int_{0}^{\infty} \frac{dk}{k^2-k_0^2} =0.
$$
## Treatment of Singular Integrals
We can use this to express a principal values integral
as
<!-- Equation labels as ordinary links -->
<div id="eq:trick_pintegral"></div>
$$
\begin{equation}
{\cal P}\int_{0}^{\infty} \frac{f(k)dk}{k^2-k_0^2} =
\int_{0}^{\infty} \frac{(f(k)-f(k_0))dk}{k^2-k_0^2},
\label{eq:trick_pintegral} \tag{26}
\end{equation}
$$
where the right-hand side is no longer singular at
$k=k_0$, it is proportional to the derivative $df/dk$,
and can be evaluated numerically as any other integral.
Such a trick is often used when evaluating integral equations.
## Example of a multidimensional integral
Here we show an example of a multidimensional integral which appears in quantum mechanical calculations.
The ansatz for the wave function for two electrons is given by the product of two
$1s$ wave functions as
$$
\Psi({\bf r}_1,{\bf r}_2) = \exp{-(\alpha (r_1+r_2))}.
$$
The integral we need to solve is the quantum mechanical expectation value of the correlation
energy between two electrons, namely
$$
I = \int_{-\infty}^{\infty} d{\bf r}_1d{\bf r}_2 \exp{-2(\alpha (r_1+r_2))}\frac{1}{|{\bf r}_1-{\bf r}_2|}.
$$
The integral has an exact solution $5\pi^2/16 = 0.19277$.
## Parts of code and brute force Gauss-Legendre quadrature
If we use Gaussian quadrature with Legendre polynomials (without rewriting the integral), we have
double *x = new double [N];
double *w = new double [N];
// set up the mesh points and weights
GaussLegendrePoints(a,b,x,w, N);
// evaluate the integral with the Gauss-Legendre method
// Note that we initialize the sum
double int_gauss = 0.;
// six-double loops
for (int i=0;i<N;i++){
for (int j = 0;j<N;j++){
for (int k = 0;k<N;k++){
for (int l = 0;l<N;l++){
for (int m = 0;m<N;m++){
for (int n = 0;n<N;n++){
int_gauss+=w[i]*w[j]*w[k]*w[l]*w[m]*w[n]
*int_function(x[i],x[j],x[k],x[l],x[m],x[n]);
}}}}}
}
## The function to integrate, code example
// this function defines the function to integrate
double int_function(double x1, double y1, double z1, double x2, double y2, double z2)
{
double alpha = 2.;
// evaluate the different terms of the exponential
double exp1=-2*alpha*sqrt(x1*x1+y1*y1+z1*z1);
double exp2=-2*alpha*sqrt(x2*x2+y2*y2+z2*z2);
double deno=sqrt(pow((x1-x2),2)+pow((y1-y2),2)+pow((z1-z2),2));
return exp(exp1+exp2)/deno;
} // end of function to evaluate
## Laguerre polynomials
Using Legendre polynomials for the Gaussian quadrature is not very efficient. There are several reasons for this:
* You can easily end up in situations where the integrand diverges
* The limits $\pm \infty$ have to be approximated with a finite number
It is very useful here to change to spherical coordinates
$$
d{\bf r}_1d{\bf r}_2 = r_1^2dr_1 r_2^2dr_2 dcos(\theta_1)dcos(\theta_2)d\phi_1d\phi_2,
$$
and
$$
\frac{1}{r_{12}}= \frac{1}{\sqrt{r_1^2+r_2^2-2r_1r_2cos(\beta)}}
$$
with
$$
\cos(\beta) = \cos(\theta_1)\cos(\theta_2)+\sin(\theta_1)\sin(\theta_2)\cos(\phi_1-\phi_2))
$$
## Laguerre polynomials, the new integrand
This means that our integral becomes
$$
I=\int_0^{\infty} r_1^2dr_1 \int_0^{\infty}r_2^2dr_2 \int_0^{\pi}dcos(\theta_1)\int_0^{\pi}dcos(\theta_2)\int_0^{2\pi}d\phi_1\int_0^{2\pi}d\phi_2 \frac{\exp{-2\alpha (r_1+r_2)}}{r_{12}}
$$
where we have defined
$$
\frac{1}{r_{12}}= \frac{1}{\sqrt{r_1^2+r_2^2-2r_1r_2cos(\beta)}}
$$
with
$$
\cos(\beta) = \cos(\theta_1)\cos(\theta_2)+\sin(\theta_1)\sin(\theta_2)\cos(\phi_1-\phi_2))
$$
## Laguerre polynomials, new integration rule: Gauss-Laguerre
Our integral is now given by
$$
I=\int_0^{\infty} r_1^2dr_1 \int_0^{\infty}r_2^2dr_2 \int_0^{\pi}dcos(\theta_1)\int_0^{\pi}dcos(\theta_2)\int_0^{2\pi}d\phi_1\int_0^{2\pi}d\phi_2 \frac{\exp{-2\alpha (r_1+r_2)}}{r_{12}}
$$
For the angles we need to perform the integrations over $\theta_i\in [0,\pi]$ and $\phi_i \in [0,2\pi]$. However, for the radial part we can now either use
* Gauss-Legendre wth an appropriate mapping or
* Gauss-Laguerre taking properly care of the integrands involving the $r_i^2 \exp{-(2\alpha r_i)}$ terms.
## Results with $N=20$ with Gauss-Legendre
<table border="1">
<thead>
<tr><th align="center">$r_{\mathrm{max}}$</th> <th align="center"> Integral </th> <th align="center"> Error </th> </tr>
</thead>
<tbody>
<tr><td align="center"> 1.00 </td> <td align="center"> 0.161419805 </td> <td align="center"> 0.0313459063 </td> </tr>
<tr><td align="center"> 1.50 </td> <td align="center"> 0.180468967 </td> <td align="center"> 0.012296744 </td> </tr>
<tr><td align="center"> 2.00 </td> <td align="center"> 0.177065182 </td> <td align="center"> 0.0157005292 </td> </tr>
<tr><td align="center"> 2.50 </td> <td align="center"> 0.167970694 </td> <td align="center"> 0.0247950165 </td> </tr>
<tr><td align="center"> 3.00 </td> <td align="center"> 0.156139391 </td> <td align="center"> 0.0366263199 </td> </tr>
</tbody>
</table>
## Results for $r_{\mathrm{max}}=2$ with Gauss-Legendre
<table border="1">
<thead>
<tr><th align="center">$N$</th> <th align="center"> Integral </th> <th align="center"> Error </th> </tr>
</thead>
<tbody>
<tr><td align="center"> 10 </td> <td align="center"> 0.129834248 </td> <td align="center"> 0.0629314631 </td> </tr>
<tr><td align="center"> 16 </td> <td align="center"> 0.167860437 </td> <td align="center"> 0.0249052742 </td> </tr>
<tr><td align="center"> 20 </td> <td align="center"> 0.177065182 </td> <td align="center"> 0.0157005292 </td> </tr>
<tr><td align="center"> 26 </td> <td align="center"> 0.183543237 </td> <td align="center"> 0.00922247353 </td> </tr>
<tr><td align="center"> 30 </td> <td align="center"> 0.185795624 </td> <td align="center"> 0.00697008738 </td> </tr>
</tbody>
</table>
## Results with Gauss-Laguerre
<table border="1">
<thead>
<tr><th align="center">$N$</th> <th align="center"> Integral </th> <th align="center"> Error </th> </tr>
</thead>
<tbody>
<tr><td align="center"> 10 </td> <td align="center"> 0.186457345 </td> <td align="center"> 0.00630836601 </td> </tr>
<tr><td align="center"> 16 </td> <td align="center"> 0.190113364 </td> <td align="center"> 0.00265234708 </td> </tr>
<tr><td align="center"> 20 </td> <td align="center"> 0.19108178 </td> <td align="center"> 0.00168393093 </td> </tr>
<tr><td align="center"> 26 </td> <td align="center"> 0.191831828 </td> <td align="center"> 0.000933882594 </td> </tr>
<tr><td align="center"> 30 </td> <td align="center"> 0.192113712 </td> <td align="center"> 0.000651999339 </td> </tr>
</tbody>
</table>
The code that was used to generate these results can be found under the [program link](https://github.com/CompPhysics/ComputationalPhysics/blob/master/doc/Programs/LecturePrograms/programs/NumericalIntegration/cpp/program2.cpp).
|
github_jupyter
|
from math import *
from sympy import *
def Trapez(a,b,f,n):
h = (b-a)/float(n)
s = 0
x = a
for i in range(1,n,1):
x = x+h
s = s+ f(x)
s = 0.5*(f(a)+f(b)) +s
return h*s
# function to compute pi
def function(x):
return 4.0/(1+x*x)
a = 0.0; b = 1.0; n = 100
result = Trapez(a,b,function,n)
print("Trapezoidal rule=", result)
# define x as a symbol to be used by sympy
x = Symbol('x')
exact = integrate(function(x), (x, 0.0, 1.0))
print("Sympy integration=", exact)
# Find relative error
print("Relative error", abs((exact-result)/exact))
%matplotlib inline
from math import log10
import numpy as np
from sympy import Symbol, integrate
import matplotlib.pyplot as plt
# function for the trapezoidal rule
def Trapez(a,b,f,n):
h = (b-a)/float(n)
s = 0
x = a
for i in range(1,n,1):
x = x+h
s = s+ f(x)
s = 0.5*(f(a)+f(b)) +s
return h*s
# function to compute pi
def function(x):
return 4.0/(1+x*x)
# define integration limits
a = 0.0; b = 1.0;
# find result from sympy
# define x as a symbol to be used by sympy
x = Symbol('x')
exact = integrate(function(x), (x, a, b))
# set up the arrays for plotting the relative error
n = np.zeros(9); y = np.zeros(9);
# find the relative error as function of integration points
for i in range(1, 8, 1):
npts = 10**i
result = Trapez(a,b,function,npts)
RelativeError = abs((exact-result)/exact)
n[i] = log10(npts); y[i] = log10(RelativeError);
plt.plot(n,y, 'ro')
plt.xlabel('n')
plt.ylabel('Relative error')
plt.show()
from math import exp
import numpy as np
from sympy import Symbol, integrate, exp, oo
# function for the trapezoidal rule
def TrapezoidalRule(a,b,f,n):
h = (b-a)/float(n)
s = 0
x = a
for i in range(1,n,1):
x = x+h
s = s+ f(x)
s = 0.5*(f(a)+f(b)) +s
return h*s
# function for the Gaussian quadrature with Laguerre polynomials
def GaussLaguerreRule(n):
s = 0
xgauleg, wgauleg = np.polynomial.laguerre.laggauss(n)
for i in range(1,n,1):
s = s+ xgauleg[i]*xgauleg[i]*wgauleg[i]
return s
# function to compute
def function(x):
return x*x*exp(-x)
# Integration limits for the Trapezoidal rule
a = 0.0; b = 10000.0
# define x as a symbol to be used by sympy
x = Symbol('x')
# find result from sympy
exact = integrate(function(x), (x, a, oo))
# set up the arrays for plotting the relative error
n = np.zeros(40); Trapez = np.zeros(4); LagGauss = np.zeros(4);
# find the relative error as function of integration points
for i in range(1, 3, 1):
npts = 10**i
n[i] = npts
Trapez[i] = abs((TrapezoidalRule(a,b,function,npts)-exact)/exact)
LagGauss[i] = abs((GaussLaguerreRule(npts)-exact)/exact)
print "Integration points=", n[1], n[2]
print "Trapezoidal relative error=", Trapez[1], Trapez[2]
print "LagGuass relative error=", LagGauss[1], LagGauss[2]
| 0.552057 | 0.877844 |
# Section 1
## Learning PyTorch for the first time
In this section, we learn how to use pretrained models for imagenet classification
```
import torch
torch.cuda.is_available()
from torchvision import models
dir(models)
alexnet = models.AlexNet()
alexnet
resnet = models.resnet101(pretrained=False)
resnet
resnet.load_state_dict(torch.load("/Users/ramanshsharma/pytorch_practice/resnet.pth"))
from torchvision import transforms
# sort of like putting layers of transformations together
preprocess = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(254),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
from PIL import Image
img = Image.open("dog.jpeg")
# img.show()
img_p = preprocess(img)
img_p.shape
img_r = torch.unsqueeze(img_p, 0)
img_r.shape
# 1 dimension added at beginning to refer to the number of examples in the data
resnet.eval() # important for the model to predict properly
# eval basically tells the model that some layers such as dropout
# and batchnorm are not to be used while evaluating
out = resnet(img_r)
out.shape
with open('data_labels.txt') as f:
labels = [line.strip() for line in f.readlines()]
len(labels)
values, idx = torch.max(out, 1)
idx = idx[0]
idx
confidence = torch.nn.functional.softmax(out, dim=1)[0] * 100
whatami = labels[idx]
me = confidence[idx]
print(f"This is {whatami} with {torch.round(me)}% confidence.")
confidence[idx].item()
_, idx = torch.sort(out, descending=True)
idx.shape
[f"{labels[i]}, {confidence[i]}%" for i in idx[0][:5]]
torch.save(resnet.state_dict(), "/Users/ramanshsharma/pytorch_practice/resnet.pth")
```
# Section 2
## Learning how to use GAN
In this section we learn how to use Generative Adversarial Networks to produce real looking images.
```
# IT IS ASSUMED I HAVE NO IDEA OF THE CODE BELOW
from torch import nn
import torch
class ResNetBlock(nn.Module): # <1>
def __init__(self, dim):
super().__init__()
self.conv_block = self.build_conv_block(dim)
def build_conv_block(self, dim):
conv_block = nn.Sequential(
nn.ReflectionPad2d(1),
nn.Conv2d(dim, dim, kernel_size=3, padding=0, bias=True),
nn.InstanceNorm2d(dim),
nn.ReLU(True),
nn.ReflectionPad2d(1),
nn.Conv2d(dim, dim, kernel_size=3, padding=0, bias=True),
nn.InstanceNorm2d(dim)
)
return conv_block
def forward(self, x):
out = x + self.conv_block(x) # <2>
return out
class ResNetGenerator(nn.Module):
def __init__(self, input_nc=3, output_nc=3, ngf=64, n_blocks=9): # <3>
assert n_blocks >= 0
super().__init__()
self.input_nc = input_nc
self.output_nc = output_nc
self.ngf = ngf
model = [
nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=True),
nn.InstanceNorm2d(ngf),
nn.ReLU(True)
]
n_downsampling = 2
for i in range(n_downsampling):
mult = 2 ** i
model.extend([
nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3,
stride=2, padding=1, bias=True),
nn.InstanceNorm2d(ngf * mult * 2),
nn.ReLU(True)
])
mult = 2 * n_downsampling
for i in range(n_blocks):
model.append(ResNetBlock(ngf * mult))
for i in range(n_downsampling):
mult = 2 ** (n_downsampling - i)
model.extend([
nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=True),
nn.InstanceNorm2d(int(ngf * mult / 2)),
nn.ReLU(True)
])
model.append(nn.ReflectionPad2d(3))
model.append(nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0))
model.append(nn.Tanh())
self.model = nn.Sequential(*model)
def forward(self, input): # <3>
return self.model(input)
net = ResNetGenerator()
net
param_path = "/Users/ramanshsharma/pytorch_practice/horse2zebra_0.4.0.pth"
loaded_param = torch.load(param_path)
net.load_state_dict(loaded_param)
net.eval()
preprocess = transforms.Compose([
transforms.Resize((256, 256)),
transforms.ToTensor()
])
img = Image.open('/Users/ramanshsharma/pytorch_practice/dog.jpeg')
img.show()
img_t = preprocess(img)
img_r = torch.unsqueeze(img_t, 0) # adding that extra dimension for
# number of examples
img_r.shape
out = net(img_r)
out_t = (out.data.squeeze() + 1.0) / 2.0
out_img = transforms.ToPILImage()(out_t)
out_img.show()
# NOTE: when a dog is fed to the model, it remains unchanged
# hinting to the fact that the model does not overfit
```
# Section 3
## Learning natural language
In this section, we apply ideas of natural language by making an image captioning system.
This section could only have been done through terminal, but I am not about to download such a big repository for one execution on terminal.
# Section 4
## Torch Hub
```
from torch import hub
# hub does not require GitHub repos to be cloned
# yet allows to import models if the repo has a hubconf.py file
resnet18 = hub.load('pytorch/vision:master', # name of account, repo, branch
'resnet18', # name of entry point function
pretrained=True # params that entry point funcs can have
)
resnet18.state_dict()
# model weights
# these libraries were idenntified by pytorch errors produced in
# their absence
import torchaudio
import soundfile as sf
sample_model = hub.load('facebookresearch/CPC_audio',
'CPC_audio',
pretrained=False
)
# YAY I DID THIS MYSELF
```
|
github_jupyter
|
import torch
torch.cuda.is_available()
from torchvision import models
dir(models)
alexnet = models.AlexNet()
alexnet
resnet = models.resnet101(pretrained=False)
resnet
resnet.load_state_dict(torch.load("/Users/ramanshsharma/pytorch_practice/resnet.pth"))
from torchvision import transforms
# sort of like putting layers of transformations together
preprocess = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(254),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
from PIL import Image
img = Image.open("dog.jpeg")
# img.show()
img_p = preprocess(img)
img_p.shape
img_r = torch.unsqueeze(img_p, 0)
img_r.shape
# 1 dimension added at beginning to refer to the number of examples in the data
resnet.eval() # important for the model to predict properly
# eval basically tells the model that some layers such as dropout
# and batchnorm are not to be used while evaluating
out = resnet(img_r)
out.shape
with open('data_labels.txt') as f:
labels = [line.strip() for line in f.readlines()]
len(labels)
values, idx = torch.max(out, 1)
idx = idx[0]
idx
confidence = torch.nn.functional.softmax(out, dim=1)[0] * 100
whatami = labels[idx]
me = confidence[idx]
print(f"This is {whatami} with {torch.round(me)}% confidence.")
confidence[idx].item()
_, idx = torch.sort(out, descending=True)
idx.shape
[f"{labels[i]}, {confidence[i]}%" for i in idx[0][:5]]
torch.save(resnet.state_dict(), "/Users/ramanshsharma/pytorch_practice/resnet.pth")
# IT IS ASSUMED I HAVE NO IDEA OF THE CODE BELOW
from torch import nn
import torch
class ResNetBlock(nn.Module): # <1>
def __init__(self, dim):
super().__init__()
self.conv_block = self.build_conv_block(dim)
def build_conv_block(self, dim):
conv_block = nn.Sequential(
nn.ReflectionPad2d(1),
nn.Conv2d(dim, dim, kernel_size=3, padding=0, bias=True),
nn.InstanceNorm2d(dim),
nn.ReLU(True),
nn.ReflectionPad2d(1),
nn.Conv2d(dim, dim, kernel_size=3, padding=0, bias=True),
nn.InstanceNorm2d(dim)
)
return conv_block
def forward(self, x):
out = x + self.conv_block(x) # <2>
return out
class ResNetGenerator(nn.Module):
def __init__(self, input_nc=3, output_nc=3, ngf=64, n_blocks=9): # <3>
assert n_blocks >= 0
super().__init__()
self.input_nc = input_nc
self.output_nc = output_nc
self.ngf = ngf
model = [
nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=True),
nn.InstanceNorm2d(ngf),
nn.ReLU(True)
]
n_downsampling = 2
for i in range(n_downsampling):
mult = 2 ** i
model.extend([
nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3,
stride=2, padding=1, bias=True),
nn.InstanceNorm2d(ngf * mult * 2),
nn.ReLU(True)
])
mult = 2 * n_downsampling
for i in range(n_blocks):
model.append(ResNetBlock(ngf * mult))
for i in range(n_downsampling):
mult = 2 ** (n_downsampling - i)
model.extend([
nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=True),
nn.InstanceNorm2d(int(ngf * mult / 2)),
nn.ReLU(True)
])
model.append(nn.ReflectionPad2d(3))
model.append(nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0))
model.append(nn.Tanh())
self.model = nn.Sequential(*model)
def forward(self, input): # <3>
return self.model(input)
net = ResNetGenerator()
net
param_path = "/Users/ramanshsharma/pytorch_practice/horse2zebra_0.4.0.pth"
loaded_param = torch.load(param_path)
net.load_state_dict(loaded_param)
net.eval()
preprocess = transforms.Compose([
transforms.Resize((256, 256)),
transforms.ToTensor()
])
img = Image.open('/Users/ramanshsharma/pytorch_practice/dog.jpeg')
img.show()
img_t = preprocess(img)
img_r = torch.unsqueeze(img_t, 0) # adding that extra dimension for
# number of examples
img_r.shape
out = net(img_r)
out_t = (out.data.squeeze() + 1.0) / 2.0
out_img = transforms.ToPILImage()(out_t)
out_img.show()
# NOTE: when a dog is fed to the model, it remains unchanged
# hinting to the fact that the model does not overfit
from torch import hub
# hub does not require GitHub repos to be cloned
# yet allows to import models if the repo has a hubconf.py file
resnet18 = hub.load('pytorch/vision:master', # name of account, repo, branch
'resnet18', # name of entry point function
pretrained=True # params that entry point funcs can have
)
resnet18.state_dict()
# model weights
# these libraries were idenntified by pytorch errors produced in
# their absence
import torchaudio
import soundfile as sf
sample_model = hub.load('facebookresearch/CPC_audio',
'CPC_audio',
pretrained=False
)
# YAY I DID THIS MYSELF
| 0.86293 | 0.872456 |
```
import bs4 as bs, urllib, pandas as pd, numpy as np
```
Test parsing syntax
```
keyword='vaddiszno'
time1='2018-07-01'
time2='2018-08-01'
baseurl=u'https://szekelyhon.ro/kereses?op=search&src_words='
url=baseurl+keyword+'&src_time1='+time1+'&src_time2='+time2
```
Parse past X years
```
start='2002-12-01'
end='2018-11-01'
dates=[]
datelist = pd.date_range(start=pd.to_datetime(start), end=pd.to_datetime(end), freq='M').tolist()
for date in datelist:
dates.append(str(date)[:10])
def extractor(time1,time2):
time1=dates[i]
time2=dates[i+1]
print('Parsing...',time1,'-',time2)
url=baseurl+keyword+'&src_time1='+time1+'&src_time2='+time2
html = urllib.request.urlopen(url).read()
soup = bs.BeautifulSoup(html,'lxml')
return soup.findAll("div", {"class": "catinner"})
divs=[]
for i in range(len(dates)-1):
time1=dates[i]
time2=dates[i+1]
divs.append(extractor(time1,time2))
def date_hu_en(i):
date=i[6:-4]
if date=='augusztus': m='08'
elif date=='december': m='12'
elif date=='február': m='02'
elif date=='január': m='01'
elif date=='július': m='07'
elif date=='június': m='06'
elif date=='május': m='05'
elif date=='március': m='03'
elif date=='november': m='11'
elif date==u'október': m='10'
elif date==u'szeptember': m='09'
elif date==u'április': m='04'
else: return date
return i[:4]+'-'+m+'-'+i[-3:-1]
```
Relevant = Medves cikk-e vagy sem
Deaths = Halalok szama
Severity = Sulyossag: 0-mas jellegu hir, 1-latas, 2-allat-tamadas, 3-ember-tamadas
```
def text_processor(title,content):
relevant=0
severity=0
deaths=0
tamadas=[u'támad',u'sebes']
for i in tamadas:
if i in title+content:
relevant=1
severity=2
tamadas=[u'halál',u'áldozat',u'ölt ']
for i in tamadas:
if i in title+content:
relevant=1
severity=3
tamadas=[u'vaddiszn']
for i in tamadas:
if i in title.replace(',',' ').replace('.',' ').lower():
relevant=1
return relevant,severity,deaths
hirek=[]
for divgroup in divs:
for div in divgroup:
idiv=div.find('div')
content=div.find('p')
date=idiv.text[idiv.text.find('201'):idiv.text.find(',')]
title=div.find('h2').text
if content==None:
sdiv=str(div)[::-1]
content=sdiv[:sdiv.find('>a/<')].replace('\r','').replace('\t','').replace('\n','')[::-1][:-6]
else: content=content.text
relevant,severity,deaths=text_processor(title,content)
hirek.append({'date':date_hu_en(date),
'hudate':date,
'title':title,
'content':content,
'link':div.findAll('a')[-1]['href'],
'relevant':relevant,
'severity':severity,
'deaths':deaths
})
```
Összes vaddisznós hír
```
len(hirek)
df=pd.DataFrame().from_dict(hirek)
df['date']=pd.to_datetime(df['date'])
df=df.sort_values('date').reset_index(drop=True)
```
Save to medve Excel. Manual curation
```
df.columns
df=df[[ 'date', 'hudate', 'link', 'title','content','relevant', 'severity','deaths']]
df.to_excel('szekelyhon_vaddiszno.xlsx')
df.to_excel('szekelyhon_vaddiszno_curated.xlsx')
```
Manually check and curate the `curated` file. Don't touch the other one.
Load curated
```
dc=pd.read_excel('szekelyhon_vaddiszno_curated.xlsx')
dg=dc[dc['relevant']==1]
dg
```
Further fixes from full text
```
hirek_full=[]
for divgroup in divs:
for div in divgroup:
hirek_full.append(div)
df=pd.DataFrame().from_dict(hirek)
df['date']=pd.to_datetime(df['date'])
df=df.sort_values('date').reset_index()
index_converter=list(df['index'])
hirek[index_converter[252]]
hirek[index_converter[1987]]
```
|
github_jupyter
|
import bs4 as bs, urllib, pandas as pd, numpy as np
keyword='vaddiszno'
time1='2018-07-01'
time2='2018-08-01'
baseurl=u'https://szekelyhon.ro/kereses?op=search&src_words='
url=baseurl+keyword+'&src_time1='+time1+'&src_time2='+time2
start='2002-12-01'
end='2018-11-01'
dates=[]
datelist = pd.date_range(start=pd.to_datetime(start), end=pd.to_datetime(end), freq='M').tolist()
for date in datelist:
dates.append(str(date)[:10])
def extractor(time1,time2):
time1=dates[i]
time2=dates[i+1]
print('Parsing...',time1,'-',time2)
url=baseurl+keyword+'&src_time1='+time1+'&src_time2='+time2
html = urllib.request.urlopen(url).read()
soup = bs.BeautifulSoup(html,'lxml')
return soup.findAll("div", {"class": "catinner"})
divs=[]
for i in range(len(dates)-1):
time1=dates[i]
time2=dates[i+1]
divs.append(extractor(time1,time2))
def date_hu_en(i):
date=i[6:-4]
if date=='augusztus': m='08'
elif date=='december': m='12'
elif date=='február': m='02'
elif date=='január': m='01'
elif date=='július': m='07'
elif date=='június': m='06'
elif date=='május': m='05'
elif date=='március': m='03'
elif date=='november': m='11'
elif date==u'október': m='10'
elif date==u'szeptember': m='09'
elif date==u'április': m='04'
else: return date
return i[:4]+'-'+m+'-'+i[-3:-1]
def text_processor(title,content):
relevant=0
severity=0
deaths=0
tamadas=[u'támad',u'sebes']
for i in tamadas:
if i in title+content:
relevant=1
severity=2
tamadas=[u'halál',u'áldozat',u'ölt ']
for i in tamadas:
if i in title+content:
relevant=1
severity=3
tamadas=[u'vaddiszn']
for i in tamadas:
if i in title.replace(',',' ').replace('.',' ').lower():
relevant=1
return relevant,severity,deaths
hirek=[]
for divgroup in divs:
for div in divgroup:
idiv=div.find('div')
content=div.find('p')
date=idiv.text[idiv.text.find('201'):idiv.text.find(',')]
title=div.find('h2').text
if content==None:
sdiv=str(div)[::-1]
content=sdiv[:sdiv.find('>a/<')].replace('\r','').replace('\t','').replace('\n','')[::-1][:-6]
else: content=content.text
relevant,severity,deaths=text_processor(title,content)
hirek.append({'date':date_hu_en(date),
'hudate':date,
'title':title,
'content':content,
'link':div.findAll('a')[-1]['href'],
'relevant':relevant,
'severity':severity,
'deaths':deaths
})
len(hirek)
df=pd.DataFrame().from_dict(hirek)
df['date']=pd.to_datetime(df['date'])
df=df.sort_values('date').reset_index(drop=True)
df.columns
df=df[[ 'date', 'hudate', 'link', 'title','content','relevant', 'severity','deaths']]
df.to_excel('szekelyhon_vaddiszno.xlsx')
df.to_excel('szekelyhon_vaddiszno_curated.xlsx')
dc=pd.read_excel('szekelyhon_vaddiszno_curated.xlsx')
dg=dc[dc['relevant']==1]
dg
hirek_full=[]
for divgroup in divs:
for div in divgroup:
hirek_full.append(div)
df=pd.DataFrame().from_dict(hirek)
df['date']=pd.to_datetime(df['date'])
df=df.sort_values('date').reset_index()
index_converter=list(df['index'])
hirek[index_converter[252]]
hirek[index_converter[1987]]
| 0.100134 | 0.639666 |
# Introduction
This IPython notebook explains a basic workflow two tables using py_entitymatching. The goal is to come up with a workflow to match books from Goodreads and Amazon. Specifically, we want to maximize F1. The datasets contain information about the books.
First, we need to import py_entitymatching package and other libraries as follows:
```
import py_entitymatching as em
import pandas as pd
import os
import sys
from timeit import default_timer as timer
# Display the versions
print('python version: ' + sys.version )
print('pandas version: ' + pd.__version__ )
print('magellan version: ' + em.__version__ )
```
Matching two tables typically consists of the following three steps:
1. Reading the input tables
2. Blocking the input tables to get a candidate set
3. Matching the tuple pairs in the candidate set
## Read input tables
```
source1 = 'source1_cleaned.csv'
source2 = 'source2_cleaned.csv'
# Read the data
A = em.read_csv_metadata(source1)
B = em.read_csv_metadata(source2)
# Set the metadata
em.set_key(A, 'ID')
em.set_key(B, 'ID')
print('Number of tuples in A: ' + str(len(A)))
print('Number of tuples in B: ' + str(len(B)))
print('Number of tuples in A X B (i.e the cartesian product): ' + str(len(A)*len(B)))
A.head(2)
B.head(2)
# Display the keys of the input tables
em.get_key(A), em.get_key(B)
```
Here we will proceed without downsampling the datasets and use the entire dataset.
## Block tables to get candidate set
Before we do the matching, we would like to remove the obviously non-matching tuple pairs from the input tables. This would reduce the number of tuple pairs considered for matching.
### Rule Based Blocker
We first get the tokenizers and the similarity functions and then get the attribute correspondence for the two tables.
We then define the following rules:
1. For a tuple pair, if the Levenshtein similarity for the **Name** attribute is less than 0.275, block them.
2. For a tuple pair, if the Jaccard similarity for the **Author** attribute is less than 0.5, block them.
#### Define the rules
```
# Rule-Based blocker
rb0 = em.RuleBasedBlocker()
block_t = em.get_tokenizers_for_blocking()
block_s = em.get_sim_funs_for_blocking()
block_c = em.get_attr_corres(A, B)
atypes_A = em.get_attr_types(A)
atypes_B = em.get_attr_types(B)
block_f = em.get_features(A, B, atypes_A, atypes_B, block_c, block_t, block_s)
# add rule for book names : block tuples if Levenshtein Similarity is below 0.275
rb0.add_rule(['Name_Name_lev_sim(ltuple, rtuple) < 0.275'], block_f)
# add rule for authors : block tuples if Jaccard Similarity is below 0.5 in spaces delimited tokens
rb0.add_rule(['Author_Author_jac_dlm_dc0_dlm_dc0(ltuple, rtuple) < 0.5'], block_f)
```
#### Perform Blocking
```
start = timer()
C0 = rb0.block_tables(A, B,
l_output_attrs=['ID', 'Name', 'Author', 'Publisher', 'Publishing_Date', 'Format', 'Pages', 'Rating'],
r_output_attrs=['ID', 'Name', 'Author', 'Publisher', 'Publishing_Date', 'Format', 'Pages', 'Rating'],
show_progress=False)
end = timer()
print("Time taken : " + str(end - start))
print(len(C0))
C0.head(2)
```
#### Overlap Blocker
We now apply the overlap blocker to the candidate set obtained in the previous step. Since the entity we are dealing with is books, there are quite a few stopwords present in the book names, such as "The", "Of", "And" etc. Hence we will remove these stopwords by setting the <i>rem_stop_words</i> to _True_ and then perform overlap blocking with the size set to 1.
We apply overlap blocking to the following attributes:
1. Book Names
2. Book Authors
```
start = timer()
# Overlap blocker
overlapBlocker = em.OverlapBlocker()
overlapBlocker.stop_words.append('of')
C1 = overlapBlocker.block_candset(C0, 'Name', 'Name', word_level=True, overlap_size=1, allow_missing=True, show_progress=False, rem_stop_words=True)
C1 = overlapBlocker.block_candset(C1, 'Author', 'Author', word_level=True, overlap_size=1, allow_missing=True, show_progress=False, rem_stop_words=True)
end = timer()
print("Time taken : " + str(end - start))
print(len(C1))
C1.head(2)
```
## Debug blocker output
The number of tuple pairs considered for matching is reduced to 1092 (from 10164387), but we would want to make sure that the blocker did not drop any potential matches.
```
# Debug blocker output
dbg = em.debug_blocker(C1, A, B, output_size=200)
dbg.head(3)
```
We can see here that we already have some matches. Since the number of matches has dropped to just 1092 from 10164387, we decided to stop debugging the blocking step and proceed with training a matcher.
```
# Saving the tuples which survived the blocking step
C1.to_csv("TuplesAfterBlocking.csv", encoding='utf-8', index=False)
```
## Labeling the candidate set
We labeled the tuples from the previous step as a match or not. 1 indicates a match and 0 indicates a non match. We did not use the <i>label_table</i> function.
We sample 500 tuple pairs for labeling, from the 1092 obtained after blocking.
```
# Sample 500 tuples for labeling
S = em.sample_table(C1, 500)
# Save this for labeling
S.to_csv('TuplesForLabeling.csv', encoding='utf-8', index=False)
```
Labeling 1092 tuples took roughly 45 minutes.
```
# Load the golden data
S = em.read_csv_metadata('TuplesForLabeling_cleaned.csv', key='_id', ltable=A, rtable=B,
fk_ltable='ltable_ID', fk_rtable='rtable_ID')
```
Samples from the golden data; The last column **match** indicates the labels we've added.
```
S.head(3)
```
## Splitting the labeled data into development and evaluation set
In this step, we split the labeled data into two sets: development (I) and evaluation (J). Specifically, the development set is used to come up with the best learning-based matcher and the evaluation set used to evaluate the selected matcher on unseen data.
```
# Split S into development set (I) and evaluation set (J)
IJ = em.split_train_test(S, train_proportion=0.7, random_state=42)
I = IJ['train']
J = IJ['test']
len(I), len(J)
```
### Save Set I and Set J
```
I.to_csv("SetI.csv", encoding='utf-8', index=False)
J.to_csv("SetJ.csv", encoding='utf-8', index=False)
```
## Selecting the best learning-based matcher
Selecting the best learning-based matcher typically involves the following steps:
1. Creating a set of learning-based matchers
2. Creating features
3. Converting the development set into feature vectors
4. Selecting the best learning-based matcher using k-fold cross validation
### Creating a set of learning-based matchers
Here, we tuned the hyperparameters a bit so that they are more relavent to our scenario.
```
# Create a set of ML-matchers
dt = em.DTMatcher(name='DecisionTree', random_state=0, criterion='gini', class_weight='balanced')
svm = em.SVMMatcher(name='SVM', kernel='linear', random_state=0)
rf = em.RFMatcher(name='RF', n_estimators=50, criterion='gini', class_weight='balanced', random_state=0)
lg = em.LogRegMatcher(name='LogReg', penalty='l2', class_weight='balanced', random_state=0)
ln = em.LinRegMatcher(name='LinReg')
nb = em.NBMatcher(name='NaiveBayes')
```
### Creating features
Here we use the automatically generated features
```
# Generate features
feature_table = em.get_features_for_matching(A, B, validate_inferred_attr_types=False)
# List the names of the features generated
feature_table['feature_name']
```
### Dropping Features
We remove a few features from the generated set of features. The reasoning is as follows:
Consider the **Publisher** attribute. While labeling the true matches, we marked a tuple pair as a true match even if the publishers did not match. The same book is usually sold in different countries under different publishers and hence though the publishers might differ, the book still refers to the same real world object. Hence we do not consider the **Publisher** attribute as a feature, as they might differ for a match. The same reasoning is extended to **Pages** and **Rating** attributes as well.
```
# Drop publishing date, rating related features
feature_table = feature_table.drop([0,1,2,3,17,18,19,20,21,22,23,24,25,30,34,35,36,37,38])
```
### Converting the development set to feature vectors
```
# Convert the I into a set of feature vectors using F
H = em.extract_feature_vecs(I,
feature_table=feature_table,
attrs_after='match',
show_progress=False)
```
#### Check for missing values
```
H.isnull().sum()
```
#### Impute missing values with mean
```
H = em.impute_table(H,
exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'match'],
strategy='mean')
```
### Selecting the best matcher using cross-validation
Now, we select the best matcher using k-fold cross-validation. We use five fold cross validation and use 'precision' and 'recall' metric to select the best matcher.
```
# Select the best ML matcher using CV
start = timer()
result = em.select_matcher([dt, rf, svm, ln, lg, nb], table=H,
exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'match'],
k=5,
target_attr='match', metric_to_select_matcher='f1', random_state=42)
end = timer()
print(end-start)
print(result['cv_stats'])
print(result['drill_down_cv_stats']['f1'])
```
#### As seen here, the random forest classifier has a precision of over 90% (96.66%) and has a recall of 61.23%. It also has the highest F1 score. Hence we do not debug further and proceed to use the random forest classifier on the test set.
## Evaluating the matching output
Evaluating the matching outputs for the evaluation set typically involves the following four steps:
1. Converting the evaluation set to feature vectors
2. Training matcher using the feature vectors extracted from the development set
3. Predicting the evaluation set using the trained matcher
4. Evaluating the predicted matches
### Converting the evaluation set to feature vectors
As before, we convert to the feature vectors (using the feature table and the evaluation set)
```
# Testing
# Convert J into a set of feature vectors using F
L = em.extract_feature_vecs(J, feature_table=feature_table,
attrs_after='match', show_progress=False)
```
### Impute the missing values in the test set
```
# Impute missing values
L = em.impute_table(L,
exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'match'],
strategy='mean')
```
### Training the selected matcher
Now, we train the matcher using all of the feature vectors from the development set. Here, we use random forest as the selected matcher.
```
# Train using feature vectors from I
rf.fit(table=L,
exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'match'],
target_attr='match')
```
### Predicting the matches
Next, we predict the matches for the evaluation set (using the feature vectors extracted from it).
```
# Predict on L
predictions = rf.predict(table=L, exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'match'],
append=True, target_attr='predicted', inplace=False)
```
### Evaluating the predictions
Finally, we evaluate the accuracy of predicted outputs
```
# Evaluate the predictions
eval_result = em.eval_matches(predictions, 'match', 'predicted')
em.print_eval_summary(eval_result)
```
### Evaluation on all learning methods
Here we see how the other 5 learning methods perform on the test set.
**Decision Tree**
```
# Train using feature vectors from I
dt.fit(table=L,
exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'match'],
target_attr='match')
# Predict on L
predictions = dt.predict(table=L, exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'match'],
append=True, target_attr='predicted', inplace=False)
# Evaluate the predictions
eval_result = em.eval_matches(predictions, 'match', 'predicted')
em.print_eval_summary(eval_result)
```
**Support Vector Machines**
```
# Train using feature vectors from I
svm.fit(table=L,
exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'match'],
target_attr='match')
# Predict on L
predictions = svm.predict(table=L, exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'match'],
append=True, target_attr='predicted', inplace=False)
# Evaluate the predictions
eval_result = em.eval_matches(predictions, 'match', 'predicted')
em.print_eval_summary(eval_result)
```
**Logisitic Regression**
```
# Train using feature vectors from I
lg.fit(table=L,
exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'match'],
target_attr='match')
# Predict on L
predictions = lg.predict(table=L, exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'match'],
append=True, target_attr='predicted', inplace=False)
# Evaluate the predictions
eval_result = em.eval_matches(predictions, 'match', 'predicted')
em.print_eval_summary(eval_result)
```
**Linear Regression**
```
# Train using feature vectors from I
ln.fit(table=L,
exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'match'],
target_attr='match')
# Predict on L
predictions = ln.predict(table=L, exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'match'],
append=True, target_attr='predicted', inplace=False)
# Evaluate the predictions
eval_result = em.eval_matches(predictions, 'match', 'predicted')
em.print_eval_summary(eval_result)
```
**Naive Bayes**
```
# Train using feature vectors from I
nb.fit(table=L,
exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'match'],
target_attr='match')
# Predict on L
predictions = nb.predict(table=L, exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'match'],
append=True, target_attr='predicted', inplace=False)
# Evaluate the predictions
eval_result = em.eval_matches(predictions, 'match', 'predicted')
em.print_eval_summary(eval_result)
```
|
github_jupyter
|
import py_entitymatching as em
import pandas as pd
import os
import sys
from timeit import default_timer as timer
# Display the versions
print('python version: ' + sys.version )
print('pandas version: ' + pd.__version__ )
print('magellan version: ' + em.__version__ )
source1 = 'source1_cleaned.csv'
source2 = 'source2_cleaned.csv'
# Read the data
A = em.read_csv_metadata(source1)
B = em.read_csv_metadata(source2)
# Set the metadata
em.set_key(A, 'ID')
em.set_key(B, 'ID')
print('Number of tuples in A: ' + str(len(A)))
print('Number of tuples in B: ' + str(len(B)))
print('Number of tuples in A X B (i.e the cartesian product): ' + str(len(A)*len(B)))
A.head(2)
B.head(2)
# Display the keys of the input tables
em.get_key(A), em.get_key(B)
# Rule-Based blocker
rb0 = em.RuleBasedBlocker()
block_t = em.get_tokenizers_for_blocking()
block_s = em.get_sim_funs_for_blocking()
block_c = em.get_attr_corres(A, B)
atypes_A = em.get_attr_types(A)
atypes_B = em.get_attr_types(B)
block_f = em.get_features(A, B, atypes_A, atypes_B, block_c, block_t, block_s)
# add rule for book names : block tuples if Levenshtein Similarity is below 0.275
rb0.add_rule(['Name_Name_lev_sim(ltuple, rtuple) < 0.275'], block_f)
# add rule for authors : block tuples if Jaccard Similarity is below 0.5 in spaces delimited tokens
rb0.add_rule(['Author_Author_jac_dlm_dc0_dlm_dc0(ltuple, rtuple) < 0.5'], block_f)
start = timer()
C0 = rb0.block_tables(A, B,
l_output_attrs=['ID', 'Name', 'Author', 'Publisher', 'Publishing_Date', 'Format', 'Pages', 'Rating'],
r_output_attrs=['ID', 'Name', 'Author', 'Publisher', 'Publishing_Date', 'Format', 'Pages', 'Rating'],
show_progress=False)
end = timer()
print("Time taken : " + str(end - start))
print(len(C0))
C0.head(2)
start = timer()
# Overlap blocker
overlapBlocker = em.OverlapBlocker()
overlapBlocker.stop_words.append('of')
C1 = overlapBlocker.block_candset(C0, 'Name', 'Name', word_level=True, overlap_size=1, allow_missing=True, show_progress=False, rem_stop_words=True)
C1 = overlapBlocker.block_candset(C1, 'Author', 'Author', word_level=True, overlap_size=1, allow_missing=True, show_progress=False, rem_stop_words=True)
end = timer()
print("Time taken : " + str(end - start))
print(len(C1))
C1.head(2)
# Debug blocker output
dbg = em.debug_blocker(C1, A, B, output_size=200)
dbg.head(3)
# Saving the tuples which survived the blocking step
C1.to_csv("TuplesAfterBlocking.csv", encoding='utf-8', index=False)
# Sample 500 tuples for labeling
S = em.sample_table(C1, 500)
# Save this for labeling
S.to_csv('TuplesForLabeling.csv', encoding='utf-8', index=False)
# Load the golden data
S = em.read_csv_metadata('TuplesForLabeling_cleaned.csv', key='_id', ltable=A, rtable=B,
fk_ltable='ltable_ID', fk_rtable='rtable_ID')
S.head(3)
# Split S into development set (I) and evaluation set (J)
IJ = em.split_train_test(S, train_proportion=0.7, random_state=42)
I = IJ['train']
J = IJ['test']
len(I), len(J)
I.to_csv("SetI.csv", encoding='utf-8', index=False)
J.to_csv("SetJ.csv", encoding='utf-8', index=False)
# Create a set of ML-matchers
dt = em.DTMatcher(name='DecisionTree', random_state=0, criterion='gini', class_weight='balanced')
svm = em.SVMMatcher(name='SVM', kernel='linear', random_state=0)
rf = em.RFMatcher(name='RF', n_estimators=50, criterion='gini', class_weight='balanced', random_state=0)
lg = em.LogRegMatcher(name='LogReg', penalty='l2', class_weight='balanced', random_state=0)
ln = em.LinRegMatcher(name='LinReg')
nb = em.NBMatcher(name='NaiveBayes')
# Generate features
feature_table = em.get_features_for_matching(A, B, validate_inferred_attr_types=False)
# List the names of the features generated
feature_table['feature_name']
# Drop publishing date, rating related features
feature_table = feature_table.drop([0,1,2,3,17,18,19,20,21,22,23,24,25,30,34,35,36,37,38])
# Convert the I into a set of feature vectors using F
H = em.extract_feature_vecs(I,
feature_table=feature_table,
attrs_after='match',
show_progress=False)
H.isnull().sum()
H = em.impute_table(H,
exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'match'],
strategy='mean')
# Select the best ML matcher using CV
start = timer()
result = em.select_matcher([dt, rf, svm, ln, lg, nb], table=H,
exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'match'],
k=5,
target_attr='match', metric_to_select_matcher='f1', random_state=42)
end = timer()
print(end-start)
print(result['cv_stats'])
print(result['drill_down_cv_stats']['f1'])
# Testing
# Convert J into a set of feature vectors using F
L = em.extract_feature_vecs(J, feature_table=feature_table,
attrs_after='match', show_progress=False)
# Impute missing values
L = em.impute_table(L,
exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'match'],
strategy='mean')
# Train using feature vectors from I
rf.fit(table=L,
exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'match'],
target_attr='match')
# Predict on L
predictions = rf.predict(table=L, exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'match'],
append=True, target_attr='predicted', inplace=False)
# Evaluate the predictions
eval_result = em.eval_matches(predictions, 'match', 'predicted')
em.print_eval_summary(eval_result)
# Train using feature vectors from I
dt.fit(table=L,
exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'match'],
target_attr='match')
# Predict on L
predictions = dt.predict(table=L, exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'match'],
append=True, target_attr='predicted', inplace=False)
# Evaluate the predictions
eval_result = em.eval_matches(predictions, 'match', 'predicted')
em.print_eval_summary(eval_result)
# Train using feature vectors from I
svm.fit(table=L,
exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'match'],
target_attr='match')
# Predict on L
predictions = svm.predict(table=L, exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'match'],
append=True, target_attr='predicted', inplace=False)
# Evaluate the predictions
eval_result = em.eval_matches(predictions, 'match', 'predicted')
em.print_eval_summary(eval_result)
# Train using feature vectors from I
lg.fit(table=L,
exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'match'],
target_attr='match')
# Predict on L
predictions = lg.predict(table=L, exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'match'],
append=True, target_attr='predicted', inplace=False)
# Evaluate the predictions
eval_result = em.eval_matches(predictions, 'match', 'predicted')
em.print_eval_summary(eval_result)
# Train using feature vectors from I
ln.fit(table=L,
exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'match'],
target_attr='match')
# Predict on L
predictions = ln.predict(table=L, exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'match'],
append=True, target_attr='predicted', inplace=False)
# Evaluate the predictions
eval_result = em.eval_matches(predictions, 'match', 'predicted')
em.print_eval_summary(eval_result)
# Train using feature vectors from I
nb.fit(table=L,
exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'match'],
target_attr='match')
# Predict on L
predictions = nb.predict(table=L, exclude_attrs=['_id', 'ltable_ID', 'rtable_ID', 'match'],
append=True, target_attr='predicted', inplace=False)
# Evaluate the predictions
eval_result = em.eval_matches(predictions, 'match', 'predicted')
em.print_eval_summary(eval_result)
| 0.394434 | 0.965446 |
# Part 2 - Branch Versioning with Parcel Fabric
### Components of a Parcel Fabric Feature Service
A Parcel Fabric feature service consists of at least 5 endpoints (SOEs)
- Mapping (.../MapServer)
- Feature Access (.../FeatureServer)
- Version Management (.../VersionManagementServer)
- Validation (.../ValidationServer)
- Parcel Fabric (../ParcelFabricServer)
Most Parcel Fabric editing operations will make use of the Feature Access, Version Management and Parcel Fabric endpo
#ints.
### Parcel editing workflow
At a high level, a typical parcel editing workflow will look like:
1. Create a branch version to isolate edits from the default version. (Version Management)
2. Start an edit session (Version Management)
3. Create a new parcel Record feature (Feature Access)
4. Edit one or more parcels (merge, divide, copy lines, etc.) (Parcel Fabric)
5. Reconcile the current branch version with the default version. (Version Management)
6. Post changes from the current version to the default version. (Version Management)
7. Stop the edit session. (Version Management)
8. Delete the version. (Version Management)
Based on this example, it is obvious that understanding versioned, multi-user editing is critical. The following notebooks will demonstrate how to list versions, create versions, start and stop edit sessions and reconcile and post edits.
```
from arcgis import GIS
from arcgis.features import _version
base_server_url = "https://myserver.domain.com/web_adaptor/rest/services/WashingtonCountyLSA/"
gis = GIS("https://myserver.domain.com/web_adaptor/", "username", "pass.word", verify_cert=False)
```
### Access branch versions via Version Management Server
```
from arcgis.features._version import VersionManager
version_management_server_url = f"{base_server_url}/VersionManagementServer"
vms = VersionManager(version_management_server_url, gis)
vms.properties
```
#### Get all versions
```
versions = vms.all
versions
```
#### Create a version
```
new_version_name = "fabric_editor_1"
vms.create(new_version_name)
```
### Access the Version Management Server through the Parcel Fabric `FeatureLayerCollection` object
The `versions` property in a parcel fabric `FeatureLayerCollection` (FLC) creates a `VersionManager` object to create, update and use versions.
The `FeatureServer` endpoint is used to create a `FeatureLayerCollection`.
```
from arcgis.features.layer import FeatureLayerCollection
parcel_fabric_feature_server_url = f"{base_server_url}/FeatureServer"
parcel_fabric_flc = FeatureLayerCollection(parcel_fabric_feature_server_url, gis)
# print the version names from the FLC's versions property:
vms_from_flc = parcel_fabric_flc.versions
[print(v.properties.versionName) for v in vms_from_flc.all]
```
### Branch Versioning Edit Sessions
A branch versioning edit session is act of obtaining shared and exclusive locks on the feature class to prevent corruption in the branch version. Calling `version.startReading` will set a shared lock on the version which prevents another session from obtaining an exclusive lock. Other sessions can still access the version as read-only. Calling `version.startEditing` will set the exclusive lock which will prevent write access and write access to the version.
Keeping track of where one is within the edit sessions is made simple with a built in context manager.
```
from arcgis.features import _parcel
parcel_fabric_manager_url = f"{base_server_url}/ParcelFabricServer"
# start a 'read' session to acquire a shared lock and
# get a branch version by its name
with vms.get("admin.generate_fabric_links", "read") as version:
parcel_fabric_manager = _parcel.ParcelFabricManager(
parcel_fabric_manager_url,
gis,
version,
parcel_fabric_flc)
# do parcel fabric or other feature service editing within the version
# i.e. parcel_fabric_manager.copy_lines_to_parcel_type(...)
parcel_fabric_manager.properties
```
#### Recocile, Post and Delete the version
When editing is complete, the new features can be posted from the new branch version to the default version. In this workflow, Reconcile must occur first. Once posted, the version can optionally be deleted.
```
version = vms.get(f"admin.{new_version_name}")
# version.reconcile()
# version.post
version.delete()
```
### API Ref Documentation
- [ArcGIS Python API - Version Manager](https://developers.arcgis.com/python/api-reference/arcgis.features.managers.html#versionmanager)
- [ArcGIS Python API - Parcel Fabric Manager](https://developers.arcgis.com/python/api-reference/arcgis.features.managers.html#parcelfabricmanager)
- [ArcGIS REST API - VersionManagementServer](https://developers.arcgis.com/rest/services-reference/enterprise/version-management-service.htm)
- [ArcGIS REST API - ParcelFabricServer](https://developers.arcgis.com/rest/services-reference/enterprise/overview-of-parcel-fabric-sevices.htm)
- [ArcGIS Pro - Branch Versioning Scenarios](https://pro.arcgis.com/en/pro-app/latest/help/data/geodatabases/overview/branch-version-scenarios.htm)
|
github_jupyter
|
from arcgis import GIS
from arcgis.features import _version
base_server_url = "https://myserver.domain.com/web_adaptor/rest/services/WashingtonCountyLSA/"
gis = GIS("https://myserver.domain.com/web_adaptor/", "username", "pass.word", verify_cert=False)
from arcgis.features._version import VersionManager
version_management_server_url = f"{base_server_url}/VersionManagementServer"
vms = VersionManager(version_management_server_url, gis)
vms.properties
versions = vms.all
versions
new_version_name = "fabric_editor_1"
vms.create(new_version_name)
from arcgis.features.layer import FeatureLayerCollection
parcel_fabric_feature_server_url = f"{base_server_url}/FeatureServer"
parcel_fabric_flc = FeatureLayerCollection(parcel_fabric_feature_server_url, gis)
# print the version names from the FLC's versions property:
vms_from_flc = parcel_fabric_flc.versions
[print(v.properties.versionName) for v in vms_from_flc.all]
from arcgis.features import _parcel
parcel_fabric_manager_url = f"{base_server_url}/ParcelFabricServer"
# start a 'read' session to acquire a shared lock and
# get a branch version by its name
with vms.get("admin.generate_fabric_links", "read") as version:
parcel_fabric_manager = _parcel.ParcelFabricManager(
parcel_fabric_manager_url,
gis,
version,
parcel_fabric_flc)
# do parcel fabric or other feature service editing within the version
# i.e. parcel_fabric_manager.copy_lines_to_parcel_type(...)
parcel_fabric_manager.properties
version = vms.get(f"admin.{new_version_name}")
# version.reconcile()
# version.post
version.delete()
| 0.284675 | 0.827061 |
#### Imputing Values
You now have some experience working with missing values, and imputing based on common methods. Now, it is your turn to put your skills to work in being able to predict for rows even when they have NaN values.
First, let's read in the necessary libraries, and get the results together from what you achieved in the previous attempt.
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
import ImputingValues as t
import seaborn as sns
%matplotlib inline
df = pd.read_csv('./survey_results_public.csv')
df.head()
#Only use quant variables and drop any rows with missing values
num_vars = df[['Salary', 'CareerSatisfaction', 'HoursPerWeek', 'JobSatisfaction', 'StackOverflowSatisfaction']]
df_dropna = num_vars.dropna(axis=0)
#Split into explanatory and response variables
X = df_dropna[['CareerSatisfaction', 'HoursPerWeek', 'JobSatisfaction', 'StackOverflowSatisfaction']]
y = df_dropna['Salary']
#Split into train and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .30, random_state=42)
lm_model = LinearRegression(normalize=True) # Instantiate
lm_model.fit(X_train, y_train) #Fit
#Predict and score the model
y_test_preds = lm_model.predict(X_test)
"The r-squared score for your model was {} on {} values.".format(r2_score(y_test, y_test_preds), len(y_test))
```
#### Question 1
**1.** As you may remember from an earlier analysis, there are many more salaries to predict than the values shown from the above code. One of the ways we can start to make predictions on these values is by imputing items into the **X** matrix instead of dropping them.
Using the **num_vars** dataframe drop the rows with missing values of the response (Salary) - store this new dataframe in **drop_sal_df**, then impute the values for all the other missing values with the mean of the column - store this in **fill_df**.
```
drop_sal_df = #Drop the rows with missing salaries
# test look
drop_sal_df.head()
#Check that you dropped all the rows that have salary missing
t.check_sal_dropped(drop_sal_df)
fill_df = #Fill all missing values with the mean of the column.
# test look
fill_df.head()
#Check your salary dropped, mean imputed datafram matches the solution
t.check_fill_df(fill_df)
```
#### Question 2
**2.** Using **fill_df**, predict Salary based on all of the other quantitative variables in the dataset. You can use the template above to assist in fitting your model:
* Split the data into explanatory and response variables
* Split the data into train and test (using seed of 42 and test_size of .30 as above)
* Instantiate your linear model using normalized data
* Fit your model on the training data
* Predict using the test data
* Compute a score for your model fit on all the data, and show how many rows you predicted for
Use the tests to assure you completed the steps correctly.
```
#Split into explanatory and response variables
#Split into train and test
#Predict and score the model
#Rsquared and y_test
rsquared_score = #r2_score
length_y_test = #num in y_test
"The r-squared score for your model was {} on {} values.".format(rsquared_score, length_y_test)
# Pass your r2_score, length of y_test to the below to check against the solution
t.r2_y_test_check(rsquared_score, length_y_test)
```
This model still isn't great. Let's see if we can't improve it by using some of the other columns in the dataset.
|
github_jupyter
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
import ImputingValues as t
import seaborn as sns
%matplotlib inline
df = pd.read_csv('./survey_results_public.csv')
df.head()
#Only use quant variables and drop any rows with missing values
num_vars = df[['Salary', 'CareerSatisfaction', 'HoursPerWeek', 'JobSatisfaction', 'StackOverflowSatisfaction']]
df_dropna = num_vars.dropna(axis=0)
#Split into explanatory and response variables
X = df_dropna[['CareerSatisfaction', 'HoursPerWeek', 'JobSatisfaction', 'StackOverflowSatisfaction']]
y = df_dropna['Salary']
#Split into train and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .30, random_state=42)
lm_model = LinearRegression(normalize=True) # Instantiate
lm_model.fit(X_train, y_train) #Fit
#Predict and score the model
y_test_preds = lm_model.predict(X_test)
"The r-squared score for your model was {} on {} values.".format(r2_score(y_test, y_test_preds), len(y_test))
drop_sal_df = #Drop the rows with missing salaries
# test look
drop_sal_df.head()
#Check that you dropped all the rows that have salary missing
t.check_sal_dropped(drop_sal_df)
fill_df = #Fill all missing values with the mean of the column.
# test look
fill_df.head()
#Check your salary dropped, mean imputed datafram matches the solution
t.check_fill_df(fill_df)
#Split into explanatory and response variables
#Split into train and test
#Predict and score the model
#Rsquared and y_test
rsquared_score = #r2_score
length_y_test = #num in y_test
"The r-squared score for your model was {} on {} values.".format(rsquared_score, length_y_test)
# Pass your r2_score, length of y_test to the below to check against the solution
t.r2_y_test_check(rsquared_score, length_y_test)
| 0.50293 | 0.935287 |
# 1. Compositional Data
Compiled by [Morgan Williams](mailto:morgan.williams@csiro.au) for C3DIS 2018
Geochemical data is compositional in nature, meaning that values are relative and subject to closure (i.e. they sum to 100%). This leads to spurious correlation (e.g. for two variable compositions $X = C(x_1, x_2)$, $x_2 = 1-x_1$ by definition), and the restriction of possible values to $\mathbb{R}\in[0,1]$.
With regards to the use of regular statistical measures on composition data, John Aitchison notes "... we would not expect that excellent tool of the wide open spaces (or $\mathbb{R}^d$) of North America, namely the barbecue, necessaarily to be an appropriate concept for cooking in the confined space (or $\mathbb{S}^d$) of a low-cost housing flatlet in Hong Kong".
Here we illustrate the features of compositional varaibles, and the steps necessary to incorporate them into a standard statistical framework.
## The Lognormal Distribution and Log Transformations
Compositional random variables are log-normally distributed: the logarithm of compositional random variables is normally distributed.
```
%matplotlib inline
%load_ext autoreload
%autoreload 2
import os, sys
import numpy as np
import scipy.stats as stats
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
sys.path.insert(0, './src')
from compositions import *
from plotting_tools import *
np.random.seed(int(''.join([str(ord(c)) for c in 'C3DIS']))//3)
fig, ax = plt.subplots(1, 3, figsize=(12, 4))
mu = 3
x = np.linspace(0, 100, 1000)
explogxs = []
expxs = []
for sigma in np.linspace(0.3, 1.3, 8):
# As sigma -> >1 - peak density appears below np.exp
# As sigma -> 0 - peak density trends towards np.exp(mu)
logdist = stats.lognorm(sigma, scale=np.exp(mu))
normdist = stats.norm(loc=mu, scale=sigma)
exp_logx = np.exp(np.mean(np.log(logdist.rvs(100000))))
explogxs.append(exp_logx)
expx = np.mean(np.log(logdist.rvs(100000)))
expxs.append(expx)
ax[0].plot(x, logdist.pdf(x), 'k-', lw=1/sigma, alpha=0.4,)
ax[0].axvline(exp_logx)
ax[1].plot(x, logdist.pdf(x), 'k-', lw=1/sigma, alpha=0.4,)
ax[1].axvline(exp_logx)
ax[2].plot(x, normdist.pdf(x), 'k-', lw=1/sigma, alpha=0.4)
ax[2].axvline(mu)
ax[0].annotate(f'E($ln(x)$) = {np.mean(explogxs):2.2f}', xy=(0.5, 0.9), ha='left', xycoords=ax[0].transAxes)
ax[0].annotate(f'E($x$) = {np.mean(expxs):2.2f}', xy=(0.5, 0.8), ha='left', xycoords=ax[0].transAxes)
ax[0].annotate('Linewidth $\propto$ 1/$\sigma$', xy=(0.5, 0.6), ha='left', xycoords=ax[0].transAxes)
ax[1].set_xscale('log')
ax[2].set_xlim((0, np.log(x.max())))
for a, t in zip(ax, ['lognormal', 'lognormal on log axes', 'normal']):
a.set_yticks([])
a.set_title(t)
```
Log-transformations of relative compositional components allow the use standard statistical techniques, with values previously constrained to $\mathbb{R}\in[0,1]$ now spread over $\mathbb{R}\in[-\infty,\infty] \equiv \mathbb{R}$. Of the log transforms, the additive log ratio (ALR) transform is one of most commonly used. It uses one component as a divisor, and taking logarithms of the relative component abundances:
$alr(x) = [ln(\frac{x_1}{x_d}),ln(\frac{x_2}{x_d}), ..., ln(\frac{x_{d-1}}{x_d})])$
Where the log-transformed variable $Y$ is composed of logarithms of the components of $X$ relative to a chosen divisor component $x_i$. The inverse of this transform is:
$invalr(y) = C([e^{y_1}, e^{y_2},... e^{y_{d-1}}, e^{1_d}])$
Where C denotes the closure operator (i.e. maintaining the 100% sum constraint). Below the invertability of this transformation is demonstrated - provided that closure is considered. The covariance structure may be sensitive to the specific divisor chosen.
```
sample_xs = [np.array([0.15, 0.3, 0.5, 0.051]).T,
np.array([[0.2, 0.25, 0.5, 0.05],
[0.15, 0.3, 0.5, 0.051]])]
for ind in [0, 1, 2, 3, -1]:
for xs in sample_xs:
Y = additive_log_ratio(xs, ind=ind)
X = inverse_additive_log_ratio(Y, ind=ind)
assert np.isclose(close(xs), X).all()
```
Another common log-transformation is the centred log transform (CLR), which instead uses the geometric mean as the divisor:
$clr(x) = [ln(\frac{x_1}{g(x)}),ln(\frac{x_2}{g(x)}), ..., ln(\frac{x_{d-1}}{g(x)})]) = ln(\frac{x}{g(x)})$ where $g(x)$ is the geometric mean $[x_1, x_2, ..., x_D]^{1/D}$
Notably, this transformation uses a single unbiased measure as the divisor - and hence will return a specific covariance structure.
## Visualising Compositional Data
#### Ternary Diagrams
Ternary diagrams are a standard method of visualising compositional data in geology. Typically limited to three components, they can be extended to tetrahedra, albeit with limited utility. While a valid and familar visualisation tool, they incorporate distortion due to the projection. One method to reduce this is centering - where data are perturbed to project the range across the diagram more effectively (e.g. Martin-Fernandez et al., 1999 and von Eynatten et al., 2002). The example below is from von Eynatten et al. (2002; Figure 2), and illustrates how variablity can be better equalized using a centering operation.

### Biplots
Biplots utilise principial component analysis to maximise the information presented in a diagram. They illustrate the pattern of relative variation of a multivariate dataset through projection onto a plane defined two principal components -both samples and variables can be represented on biplots. For a bit more on principal component analysis - see [the notebook focusing on dimensional reduction](04_Dimensional_Reduction.ipynb).
```
n = 100
dat = pd.DataFrame({'Cu': 0.1 * (np.random.randn(n) * 1 + 5),
'Mn': np.linspace(0.001, 2, n),
'Fe': 0.98 ** np.linspace(0.001, 2, n),
'Ni': np.exp(np.linspace(0.001, 1, n))
})
dat = dat.divide(dat.sum(axis=0)) # Closure
clr = CLRTransform()
tdat = clr.transform(dat)
pca = PCA(n_components = tdat.shape[1])
pca = pca.fit(tdat)
# Transformed data component axes in PC space
xvector = pca.components_[0]
yvector = pca.components_[1]
# PCA-transformed datapoints
xs = pca.transform(tdat)[:, 0]
ys = pca.transform(tdat)[:, 1]
fig, ax = plt.subplots(1, figsize=(4,4))
for i in range(len(xs)):
plt.scatter(xs[i], ys[i], color='orange', s=20)
xmin, xmax = np.nan, np.nan
ymin, ymax = np.nan, np.nan
for i in range(len(xvector)):
x1, x2 = ax.get_xlim()
y1, y2 = ax.get_ylim()
xmin, xmax = np.nanmin([xmin, x1]), np.nanmax([xmax, x2])
ymin, ymax = np.nanmin([ymin, y1]), np.nanmax([ymax, y2])
diag = np.sqrt((x2-x1)**2 + (y2-y1)**2)
scale = 10**-1 * diag
ax.plot([0, xvector[i]],
[0, yvector[i]],
color='k',
linewidth=scale,
marker='D',
markersize=3,
)
ha = ['right', 'left'][xvector[i]>0]
va = ['bottom', 'top'][yvector[i]>0]
ax.text(xvector[i]*1.2, #*max(xs)
yvector[i]*1.2, #*max(ys)
list(dat.columns)[i],
color='k',
ha=ha,
va=va)
ax.set_xlabel('PCA 1')
ax.set_xlim((xmin-0.1, xmax+0.1))
ax.set_ylim((ymin-0.1, ymax+0.1))
ax.set_ylabel('PCA 2');
```
## Compositional Distance
Due to the constraints of a closed space, euclidean distances in the simplex are not accurate measures of similarity. Instead, distance metrics should be taken from log-transformed data. This is particularly important for clustering, but also has implications for regression (e.g. incorporating least-squares or similar metrics). The figure below highlights this, with three compositional random distributions in the ternary space, for which each is equally separated and has equal variance in log-transformed space. Figure taken from Martin-Fernandez, et al. (1999; Figure 1).
The distance between two compositions $\Delta_s(x, X)$ is given by (Aitchison et al., 2000):
$\Delta_s(x, X) = \bigg[ \sum_{i=1}^{D}\big\{ln\frac{x_i}{g(x)} - ln\frac{X_i}{g(X)}\big\}^2 \bigg]^{1/2}$ where $g(x)$ is the geometric mean $[x_1, x_2, ..., x_D]^{1/D}$
Or, equivalently:
$\Delta_s(x, X) = \bigg[ \frac{1}{D} \sum_{i<j}\big\{ln\frac{x_i}{x_j} - ln\frac{X_i}{X_j}\big\}^2 \bigg]^{1/2}$

|
github_jupyter
|
%matplotlib inline
%load_ext autoreload
%autoreload 2
import os, sys
import numpy as np
import scipy.stats as stats
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
sys.path.insert(0, './src')
from compositions import *
from plotting_tools import *
np.random.seed(int(''.join([str(ord(c)) for c in 'C3DIS']))//3)
fig, ax = plt.subplots(1, 3, figsize=(12, 4))
mu = 3
x = np.linspace(0, 100, 1000)
explogxs = []
expxs = []
for sigma in np.linspace(0.3, 1.3, 8):
# As sigma -> >1 - peak density appears below np.exp
# As sigma -> 0 - peak density trends towards np.exp(mu)
logdist = stats.lognorm(sigma, scale=np.exp(mu))
normdist = stats.norm(loc=mu, scale=sigma)
exp_logx = np.exp(np.mean(np.log(logdist.rvs(100000))))
explogxs.append(exp_logx)
expx = np.mean(np.log(logdist.rvs(100000)))
expxs.append(expx)
ax[0].plot(x, logdist.pdf(x), 'k-', lw=1/sigma, alpha=0.4,)
ax[0].axvline(exp_logx)
ax[1].plot(x, logdist.pdf(x), 'k-', lw=1/sigma, alpha=0.4,)
ax[1].axvline(exp_logx)
ax[2].plot(x, normdist.pdf(x), 'k-', lw=1/sigma, alpha=0.4)
ax[2].axvline(mu)
ax[0].annotate(f'E($ln(x)$) = {np.mean(explogxs):2.2f}', xy=(0.5, 0.9), ha='left', xycoords=ax[0].transAxes)
ax[0].annotate(f'E($x$) = {np.mean(expxs):2.2f}', xy=(0.5, 0.8), ha='left', xycoords=ax[0].transAxes)
ax[0].annotate('Linewidth $\propto$ 1/$\sigma$', xy=(0.5, 0.6), ha='left', xycoords=ax[0].transAxes)
ax[1].set_xscale('log')
ax[2].set_xlim((0, np.log(x.max())))
for a, t in zip(ax, ['lognormal', 'lognormal on log axes', 'normal']):
a.set_yticks([])
a.set_title(t)
sample_xs = [np.array([0.15, 0.3, 0.5, 0.051]).T,
np.array([[0.2, 0.25, 0.5, 0.05],
[0.15, 0.3, 0.5, 0.051]])]
for ind in [0, 1, 2, 3, -1]:
for xs in sample_xs:
Y = additive_log_ratio(xs, ind=ind)
X = inverse_additive_log_ratio(Y, ind=ind)
assert np.isclose(close(xs), X).all()
n = 100
dat = pd.DataFrame({'Cu': 0.1 * (np.random.randn(n) * 1 + 5),
'Mn': np.linspace(0.001, 2, n),
'Fe': 0.98 ** np.linspace(0.001, 2, n),
'Ni': np.exp(np.linspace(0.001, 1, n))
})
dat = dat.divide(dat.sum(axis=0)) # Closure
clr = CLRTransform()
tdat = clr.transform(dat)
pca = PCA(n_components = tdat.shape[1])
pca = pca.fit(tdat)
# Transformed data component axes in PC space
xvector = pca.components_[0]
yvector = pca.components_[1]
# PCA-transformed datapoints
xs = pca.transform(tdat)[:, 0]
ys = pca.transform(tdat)[:, 1]
fig, ax = plt.subplots(1, figsize=(4,4))
for i in range(len(xs)):
plt.scatter(xs[i], ys[i], color='orange', s=20)
xmin, xmax = np.nan, np.nan
ymin, ymax = np.nan, np.nan
for i in range(len(xvector)):
x1, x2 = ax.get_xlim()
y1, y2 = ax.get_ylim()
xmin, xmax = np.nanmin([xmin, x1]), np.nanmax([xmax, x2])
ymin, ymax = np.nanmin([ymin, y1]), np.nanmax([ymax, y2])
diag = np.sqrt((x2-x1)**2 + (y2-y1)**2)
scale = 10**-1 * diag
ax.plot([0, xvector[i]],
[0, yvector[i]],
color='k',
linewidth=scale,
marker='D',
markersize=3,
)
ha = ['right', 'left'][xvector[i]>0]
va = ['bottom', 'top'][yvector[i]>0]
ax.text(xvector[i]*1.2, #*max(xs)
yvector[i]*1.2, #*max(ys)
list(dat.columns)[i],
color='k',
ha=ha,
va=va)
ax.set_xlabel('PCA 1')
ax.set_xlim((xmin-0.1, xmax+0.1))
ax.set_ylim((ymin-0.1, ymax+0.1))
ax.set_ylabel('PCA 2');
| 0.382718 | 0.988777 |
```
import sys
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import astropy.io.fits as pyfits
import astropy.utils as autils
import requests
import json
import datetime
from pprint import pprint as pp
import os
import astropy.io.fits as fits
import astropy.io.ascii as ascii
import copy
# Jielai added modules f
import subprocess
import seaborn as sns
# Time Counter function
import time
def tic():
tic.start = time.perf_counter()
def toc():
elapsed_seconds = time.perf_counter() - tic.start
return elapsed_seconds # fractional
# NOAO server Settings
natroot = 'https://astroarchive.noirlab.edu'
assert natroot == 'https://astroarchive.noirlab.edu', 'Notebook does NOT point to PRODUCTION'
print(f"Using server on {natroot}")
adsurl = f'{natroot}/api/adv_search'
print(f"adsurl = {adsurl}")
apiurl = f'{adsurl}/fasearch/?limit=200000'
print(f'Using API url: {apiurl}')
# Start the timer
print(f'Started on: {str(datetime.datetime.now())}')
tic() # Start timing the run of this notebook
def get_radec_maxmin(RAcentre,DECcentre,search_radius_deg, debug=False):
dec_min = DECcentre - search_radius_deg
dec_max = DECcentre + search_radius_deg
if dec_min<-90.0: dec_min=-90.0
if dec_max>90.0: dec_max=90.0
if dec_min==-90.0 or dec_max==90.0:
ra_min = 0
ra_max = 360.0
else:
costerm = min(np.cos(dec_min*np.pi/180.0),np.cos(dec_max*np.pi/180.0))
ra_min = RAcentre-search_radius_deg*1./costerm
ra_max = RAcentre+search_radius_deg*1./costerm
if ra_min<0: ra_min+=360.0
if ra_max>360.0: ra_max-=360.0
if debug:
print('**** DEBUG: ',dec_min, dec_max)
print('**** DEBUG: ',ra_min, ra_max)
return ra_min,ra_max,dec_min,dec_max
def makedirs_(out_path):
out_dir = '/'.join(out_path.split('/')[0:-1])
os.makedirs(out_dir, exist_ok=True)
return None
pw = 'XXX'
usrname = 'XXX'
search_radius_deg = 0.5
caldat1g = '2022-02-17'
caldat2g = '2022-02-23'
raw_dir = '/Users/jielaizhang/Desktop/' # where downloaded stuff is saved
RA = 203.6800149
DEC = 33.2167887
ra_min,ra_max,dec_min,dec_max = get_radec_maxmin(RA,DEC,search_radius_deg,debug=False)
if ra_min>ra_max:
raise RuntimeError('This needs to be fixed!!')
# Need to perform two searches to account for overlap
# Json needed to make a request to NOAO archive.
jj = {
"outfields" : [
"md5sum","release_date","proposal","archive_filename","original_filename","url",
"proc_type","prod_type","ifilter","exposure",
"ra_center","dec_center","dateobs_min","dateobs_max","caldat","filesize",
"seeing"
],
"search" : [ ["instrument", 'decam'],["telescope", 'ct4m'],["obs_type", 'object'],
['ra_center',ra_min,ra_max],['dec_center',dec_min,dec_max],
["caldat",caldat1g,caldat2g],
["proc_type","instcal","stacked"],['prod_type','image']
]
}
ads_df = pd.DataFrame(requests.post(apiurl,json=jj).json()[1:])
print('Retrieved: ',len(ads_df))
if len(ads_df) != 0:
print(np.unique(ads_df['caldat']))
print(np.unique(ads_df['exposure']))
pd.set_option('display.max_colwidth', -1)
ads_df[['archive_filename','proc_type','exposure','caldat','seeing','url']]
```
|
github_jupyter
|
import sys
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import astropy.io.fits as pyfits
import astropy.utils as autils
import requests
import json
import datetime
from pprint import pprint as pp
import os
import astropy.io.fits as fits
import astropy.io.ascii as ascii
import copy
# Jielai added modules f
import subprocess
import seaborn as sns
# Time Counter function
import time
def tic():
tic.start = time.perf_counter()
def toc():
elapsed_seconds = time.perf_counter() - tic.start
return elapsed_seconds # fractional
# NOAO server Settings
natroot = 'https://astroarchive.noirlab.edu'
assert natroot == 'https://astroarchive.noirlab.edu', 'Notebook does NOT point to PRODUCTION'
print(f"Using server on {natroot}")
adsurl = f'{natroot}/api/adv_search'
print(f"adsurl = {adsurl}")
apiurl = f'{adsurl}/fasearch/?limit=200000'
print(f'Using API url: {apiurl}')
# Start the timer
print(f'Started on: {str(datetime.datetime.now())}')
tic() # Start timing the run of this notebook
def get_radec_maxmin(RAcentre,DECcentre,search_radius_deg, debug=False):
dec_min = DECcentre - search_radius_deg
dec_max = DECcentre + search_radius_deg
if dec_min<-90.0: dec_min=-90.0
if dec_max>90.0: dec_max=90.0
if dec_min==-90.0 or dec_max==90.0:
ra_min = 0
ra_max = 360.0
else:
costerm = min(np.cos(dec_min*np.pi/180.0),np.cos(dec_max*np.pi/180.0))
ra_min = RAcentre-search_radius_deg*1./costerm
ra_max = RAcentre+search_radius_deg*1./costerm
if ra_min<0: ra_min+=360.0
if ra_max>360.0: ra_max-=360.0
if debug:
print('**** DEBUG: ',dec_min, dec_max)
print('**** DEBUG: ',ra_min, ra_max)
return ra_min,ra_max,dec_min,dec_max
def makedirs_(out_path):
out_dir = '/'.join(out_path.split('/')[0:-1])
os.makedirs(out_dir, exist_ok=True)
return None
pw = 'XXX'
usrname = 'XXX'
search_radius_deg = 0.5
caldat1g = '2022-02-17'
caldat2g = '2022-02-23'
raw_dir = '/Users/jielaizhang/Desktop/' # where downloaded stuff is saved
RA = 203.6800149
DEC = 33.2167887
ra_min,ra_max,dec_min,dec_max = get_radec_maxmin(RA,DEC,search_radius_deg,debug=False)
if ra_min>ra_max:
raise RuntimeError('This needs to be fixed!!')
# Need to perform two searches to account for overlap
# Json needed to make a request to NOAO archive.
jj = {
"outfields" : [
"md5sum","release_date","proposal","archive_filename","original_filename","url",
"proc_type","prod_type","ifilter","exposure",
"ra_center","dec_center","dateobs_min","dateobs_max","caldat","filesize",
"seeing"
],
"search" : [ ["instrument", 'decam'],["telescope", 'ct4m'],["obs_type", 'object'],
['ra_center',ra_min,ra_max],['dec_center',dec_min,dec_max],
["caldat",caldat1g,caldat2g],
["proc_type","instcal","stacked"],['prod_type','image']
]
}
ads_df = pd.DataFrame(requests.post(apiurl,json=jj).json()[1:])
print('Retrieved: ',len(ads_df))
if len(ads_df) != 0:
print(np.unique(ads_df['caldat']))
print(np.unique(ads_df['exposure']))
pd.set_option('display.max_colwidth', -1)
ads_df[['archive_filename','proc_type','exposure','caldat','seeing','url']]
| 0.324128 | 0.311721 |
```
from newsgac import database
from newsgac import config
from newsgac.data_sources import DataSource
from newsgac.pipelines import Pipeline
from newsgac.nlp_tools.models.frog import Frog
from newsgac.nlp_tools.tasks import frog_process, frogclient
from pynlpl.clients.frogclient import FrogClient
import pandas
import nltk
nltk.download('punkt')
frogclient = FrogClient(
config.frog_hostname,
config.frog_port,
returnall=True,
timeout=1800.0,
)
[d.display_title for d in DataSource.objects.all()]
data_source = DataSource.objects.first()
print data_source.articles[0].raw_text
p = Pipeline.create()
p.lemmatization = False
p.sw_removal = False
p.quote_removal = True
p.nlp_tool = Frog.create()
skp = p.get_sk_pipeline()
skp.steps
skp.steps.pop()
skp.steps.pop()
skp.steps
article_num = 5
cleaned_text = skp.steps[0][1].transform([data_source.articles[article_num].raw_text])[0]
print cleaned_text
unquoted_text = skp.steps[1][1].transformer_list[3][1].steps[0][1].transform([cleaned_text])[0]
print unquoted_text
frog_step = skp.steps[1][1].transformer_list[3][1].steps[1][1]
pandas.DataFrame(frog_step.transform([unquoted_text]), columns=frog_step.get_feature_names())
frog_step.get_feature_names()
frog_step.transform([unquoted_text])[0]
115/1082.
df = pandas.DataFrame(frog_process(unquoted_text))
l = df[df[4].map(lambda c: c[0:1]) == 'B'].groupby(1).size().reset_index().sort_values(1)
df[df[4].map(lambda c: c[0:1]) == 'B']
len(frogclient.process(cleaned_text))
# Unique named entities
tokens = frog_process(unquoted_text)
named_entities = [t for t in tokens if t[4].startswith('B')]
unique_ne_strings = []
ne_strings = set([t[1].lower() for t in named_entities])
for ne_source in ne_strings:
unique = True
for ne_target in [n for n in ne_strings if n != ne_source]:
if ne_target.find(ne_source) > -1:
unique = False
break
if unique:
unique_ne_strings.append(ne_source)
set(sorted(ne_strings)) - set(sorted(unique_ne_strings))
skp.steps
skp.steps[1][1].transformer_list[1][1].transform([cleaned_text])[0]
zip(basic_features.get_feature_names(), basic_features.transform([cleaned_text])[0])
from nltk import word_tokenize
len(word_tokenize(cleaned_text))
len(frogclient.process(cleaned_text))
skp.steps[1][1].transformer_list[2][1].transform([cleaned_text])[0]
skp.steps[1][1].transformer_list[2][1].get_feature_names()
2. / 27
from nltk import sent_tokenize
sent_tokenize('....a asd ads asd.. asdsadaasdlkj gfgfdf fdgdf . sdgsd lsdfj with dr. bla die hospital.')
```
|
github_jupyter
|
from newsgac import database
from newsgac import config
from newsgac.data_sources import DataSource
from newsgac.pipelines import Pipeline
from newsgac.nlp_tools.models.frog import Frog
from newsgac.nlp_tools.tasks import frog_process, frogclient
from pynlpl.clients.frogclient import FrogClient
import pandas
import nltk
nltk.download('punkt')
frogclient = FrogClient(
config.frog_hostname,
config.frog_port,
returnall=True,
timeout=1800.0,
)
[d.display_title for d in DataSource.objects.all()]
data_source = DataSource.objects.first()
print data_source.articles[0].raw_text
p = Pipeline.create()
p.lemmatization = False
p.sw_removal = False
p.quote_removal = True
p.nlp_tool = Frog.create()
skp = p.get_sk_pipeline()
skp.steps
skp.steps.pop()
skp.steps.pop()
skp.steps
article_num = 5
cleaned_text = skp.steps[0][1].transform([data_source.articles[article_num].raw_text])[0]
print cleaned_text
unquoted_text = skp.steps[1][1].transformer_list[3][1].steps[0][1].transform([cleaned_text])[0]
print unquoted_text
frog_step = skp.steps[1][1].transformer_list[3][1].steps[1][1]
pandas.DataFrame(frog_step.transform([unquoted_text]), columns=frog_step.get_feature_names())
frog_step.get_feature_names()
frog_step.transform([unquoted_text])[0]
115/1082.
df = pandas.DataFrame(frog_process(unquoted_text))
l = df[df[4].map(lambda c: c[0:1]) == 'B'].groupby(1).size().reset_index().sort_values(1)
df[df[4].map(lambda c: c[0:1]) == 'B']
len(frogclient.process(cleaned_text))
# Unique named entities
tokens = frog_process(unquoted_text)
named_entities = [t for t in tokens if t[4].startswith('B')]
unique_ne_strings = []
ne_strings = set([t[1].lower() for t in named_entities])
for ne_source in ne_strings:
unique = True
for ne_target in [n for n in ne_strings if n != ne_source]:
if ne_target.find(ne_source) > -1:
unique = False
break
if unique:
unique_ne_strings.append(ne_source)
set(sorted(ne_strings)) - set(sorted(unique_ne_strings))
skp.steps
skp.steps[1][1].transformer_list[1][1].transform([cleaned_text])[0]
zip(basic_features.get_feature_names(), basic_features.transform([cleaned_text])[0])
from nltk import word_tokenize
len(word_tokenize(cleaned_text))
len(frogclient.process(cleaned_text))
skp.steps[1][1].transformer_list[2][1].transform([cleaned_text])[0]
skp.steps[1][1].transformer_list[2][1].get_feature_names()
2. / 27
from nltk import sent_tokenize
sent_tokenize('....a asd ads asd.. asdsadaasdlkj gfgfdf fdgdf . sdgsd lsdfj with dr. bla die hospital.')
| 0.226955 | 0.206454 |
# Conformalized quantile regression: A synthetic example (2)
This notebook replicates the second synthetic example (heavy-tailed Cauchy distribution), provided in [1].
In this tutorial we will create synthetic 1-dimensional heteroscedastic data, and compare the usual split conformal prediction [2], its locally weighted variant [3], and the proposed conformalized quantile regression (CQR) [1] alternative. The regression function used in this experiment is random forests.
[1] Yaniv Romano, Evan Patterson, and Emmanuel J. Candes, “Conformalized quantile regression.” 2019.
[2] Papadopoulos Harris, Kostas Proedrou, Volodya Vovk, and Alex Gammerman. “Inductive confidence machines for regression.” In European Conference on Machine Learning, pp. 345-356. Springer, Berlin, Heidelberg, 2002.
[3] Papadopoulos Harris, Alex Gammerman, and Volodya Vovk. “Normalized nonconformity measures for regression conformal prediction.” In Proceedings of the IASTED International Conference on Artificial Intelligence and Applications, pp. 64-69. 2008.
## Toy example
We start by defining the desired miscoverage rate (10% in our case), and some hyper-parameters of random forests. These parameters are shared by the conditional mean and conditional quantile random forests regression.
```
import warnings
warnings.filterwarnings('ignore')
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
np.warnings.filterwarnings('ignore')
split_color = 'tomato'
local_color = 'gray'
cqr_color = 'lightblue'
%matplotlib inline
np.random.seed(6)
# desired miscoverage error
alpha = 0.1
# low and high target quantiles
quantiles = [5, 95]
# maximal number of testpoints to plot
max_show = 1000
# save figures?
save_figures = False
# parameters of random forests
n_estimators = 100
min_samples_leaf = 100 # 40
max_features = 1 # 1D signal
random_state = 0
def plot_func(x,
y,
y_u=None,
y_l=None,
pred=None,
shade_color="",
method_name="",
title="",
filename=None,
save_figures=False,
label_observations="Observations",
label_estimate="Predicted value"):
""" Scatter plot of (x,y) points along with the constructed prediction interval
Parameters
----------
x : numpy array, corresponding to the feature of each of the n samples
y : numpy array, target response variable (length n)
pred : numpy array, the estimated prediction. It may be the conditional mean,
or low and high conditional quantiles.
shade_color : string, desired color of the prediciton interval
method_name : string, name of the method
title : string, the title of the figure
filename : sting, name of the file to save the figure
save_figures : boolean, save the figure (True) or not (False)
"""
x_ = x[:max_show]
y_ = y[:max_show]
if y_u is not None:
y_u_ = y_u[:max_show]
if y_l is not None:
y_l_ = y_l[:max_show]
if pred is not None:
pred_ = pred[:max_show]
fig = plt.figure()
inds = np.argsort(np.squeeze(x_))
plt.plot(x_[inds,:], y_[inds], 'k.', alpha=.2, markersize=10,
fillstyle='none', label=label_observations)
if (y_u is not None) and (y_l is not None):
plt.fill(np.concatenate([x_[inds], x_[inds][::-1]]),
np.concatenate([y_u_[inds], y_l_[inds][::-1]]),
alpha=.3, fc=shade_color, ec='None',
label = method_name + ' prediction interval')
if pred is not None:
if pred_.ndim == 2:
plt.plot(x_[inds,:], pred_[inds,0], 'k', lw=2, alpha=0.9,
label=u'Predicted low and high quantiles')
plt.plot(x_[inds,:], pred_[inds,1], 'k', lw=2, alpha=0.9)
else:
plt.plot(x_[inds,:], pred_[inds], 'k--', lw=2, alpha=0.9,
label=label_estimate)
plt.ylim([-250, 200])
plt.xlabel('$X$')
plt.ylabel('$Y$')
plt.legend(loc='best')
plt.title(title)
if save_figures and (filename is not None):
plt.savefig(filename, bbox_inches='tight', dpi=300)
plt.show()
```
## Generate synthetic data
Here we generate our training and test samples $(X_i,Y_i)$. To generate the training data, we draw $n=2000$ independent, univariate predictor samples $X_i$ from the uniform distribution on the interval $[0,10]$. The response variable is then sampled as $$ Y_i \sim \textrm{Cauchy}(0,6 \sin^2(X_i)),$$ where $\textrm{Cauchy}(0,\gamma)$ is the Cauchy distribution with location parameter $0$ and scale parameter $\gamma$. We generate a test set of 5000 samples in the same way.
```
from scipy.stats import cauchy
# number of training examples
n_train = 2000
# number of test examples (to evaluate average coverage and length)
n_test = 5000
def f(x):
''' Construct data (1D example)
'''
ax = 0*x
for i in range(len(x)):
ax[i] = cauchy.rvs(loc = 0, scale = 6*(np.sin(x[i]))**2, size = 1)
x = ax
return x.astype(np.float32)
# training features
x_train = np.random.uniform(0, 10.0, size=n_train).astype(np.float32)
# test features
x_test = np.random.uniform(0, 10.0, size=n_test).astype(np.float32)
# generate labels
y_train = f(x_train)
y_test = f(x_test)
# reshape the features
x_train = np.reshape(x_train,(n_train,1))
x_test = np.reshape(x_test,(n_test,1))
# display the test data in full range (including the outliers)
fig = plt.figure()
plt.plot(x_test, y_test, 'k.', alpha = 0.3, markersize=10,
fillstyle='none', label=u'Observations')
plt.legend()
plt.xlabel('$X$')
plt.ylabel('$Y$')
if save_figures:
plt.savefig("illustration_test_data_cauchy.png",
bbox_inches='tight', dpi=300)
plt.show()
# display the test data without outliers (zoom in)
plot_func(x_test,y_test,title="Test data (zoom in)")
```
The heteroskedasticity of the data is evident, as the dispersion of $Y$ varies considerably with $X$. The data also contains outliers.
## CQR: Conformalized quantile regression
```
# divide the data into proper training set and calibration set
idx = np.random.permutation(n_train)
n_half = int(np.floor(n_train/2))
idx_train, idx_cal = idx[:n_half], idx[n_half:2*n_half]
from cqr import helper
from nonconformist.nc import RegressorNc
from nonconformist.cp import IcpRegressor
from nonconformist.nc import QuantileRegErrFunc
# define quantile random forests (QRF) parameters
params_qforest = dict()
params_qforest["n_estimators"] = n_estimators
params_qforest["min_samples_leaf"] = min_samples_leaf
params_qforest["max_features"] = max_features
params_qforest["CV"] = True
params_qforest["coverage_factor"] = 1
params_qforest["test_ratio"] = 0.1
params_qforest["random_state"] = random_state
params_qforest["range_vals"] = 10
params_qforest["num_vals"] = 4
# define the QRF model
quantile_estimator = helper.QuantileForestRegressorAdapter(model=None,
fit_params=None,
quantiles=quantiles,
params=params_qforest)
# define the CQR object, computing the absolute residual error of points
# located outside the estimated QRF band
nc = RegressorNc(quantile_estimator, QuantileRegErrFunc())
# build the split CQR object
icp = IcpRegressor(nc)
# fit the conditional quantile regression to the proper training data
icp.fit(x_train[idx_train], y_train[idx_train])
# compute the absolute errors on calibration data
icp.calibrate(x_train[idx_cal], y_train[idx_cal])
# produce predictions for the test set, with confidence equal to significance
predictions = icp.predict(x_test, significance=alpha)
y_lower = predictions[:,0]
y_upper = predictions[:,1]
# compute the low and high conditional quantile estimation
pred = quantile_estimator.predict(x_test)
# display the results
plot_func(x=x_test,y=y_test,y_u=y_upper,y_l=y_lower,pred=pred,shade_color=cqr_color,
method_name="CQR:",title="",
filename="illustration_split_qrf_cauchy.png",save_figures=save_figures,
label_observations="")
# compute and display the average coverage
in_the_range = np.sum((y_test >= y_lower) & (y_test <= y_upper))
print("CQR Random Forests: Percentage in the range (expecting " + str(100*(1-alpha)) + "%):",
in_the_range / len(y_test) * 100)
# compute length of the conformal interval per each test point
length_cqr_rf = y_upper - y_lower
# compute and display the average length
print("CQR Random Forests: Average length:", np.mean(length_cqr_rf))
```
The figure above shows two black curves, representing the lower and upper quantile regression estimates based on quantile random forests. The highlighted region visualizes the constructed prediction intervals obtained by CQR. As can be seen, our method obtained valid prediction interval. Notice how the length of constructed interval varies with $X$, reflecting the uncertainty in the prediction of $ Y $.
We now turn to compare the efficiency (average length) of our CQR method to the split conformal and its locally adaptive variant.
## Split conformal
```
from sklearn.ensemble import RandomForestRegressor
from nonconformist.nc import RegressorNormalizer
from nonconformist.nc import AbsErrorErrFunc
# define the conditonal mean estimator as random forests
mean_estimator = RandomForestRegressor(n_estimators=n_estimators,
min_samples_leaf=min_samples_leaf,
max_features=max_features,
random_state=random_state)
# define a conformal prediction object
nc = RegressorNc(mean_estimator, AbsErrorErrFunc())
# build a regualr split conformal prediction object
icp = IcpRegressor(nc)
# fit the conditional mean regression to the proper training data
icp.fit(x_train[idx_train], y_train[idx_train])
# compute the absolute residual error on calibration data
icp.calibrate(x_train[idx_cal], y_train[idx_cal])
# produce predictions for the test set, with confidence equal to significance
predictions = icp.predict(x_test, significance=alpha)
y_lower = predictions[:,0]
y_upper = predictions[:,1]
# compute the conditional mean estimation
pred = mean_estimator.predict(x_test)
# display the results
plot_func(x=x_test,y=y_test,y_u=y_upper,y_l=y_lower,pred=pred,shade_color=split_color,
method_name="Split:",title="",
filename="illustration_split_rf.png",save_figures=save_figures,
label_observations="")
# compute and display the average coverage
in_the_range = np.sum((y_test >= y_lower) & (y_test <= y_upper))
print("Random Forests: Percentage in the range (expecting " + str(100*(1-alpha)) + "%):",
in_the_range / len(y_test) * 100)
# compute length of the interval per each test point
length_split_rf = y_upper - y_lower
# compute and display the average length
print("Random Forests: Average length:", np.mean(length_split_rf))
```
As can be seen, the prediction interval constructed by the split conformal achieves valid coverage. Notice that the average length of the constructed interval is greater than the one obtained by CQR.
This experiment reveals a major limitation of the split conformal $-$ the length of the interval constructed by the split conformal is fixed and independent of $X$.
## Local conformal
```
# define the conditonal mean estimator as random forests (used to predict the labels)
mean_estimator = RandomForestRegressor(n_estimators=n_estimators,
min_samples_leaf=min_samples_leaf,
max_features=max_features,
random_state=random_state)
# define the MAD estimator as random forests (used to scale the absolute residuals)
mad_estimator = RandomForestRegressor(n_estimators=n_estimators,
min_samples_leaf=min_samples_leaf,
max_features=max_features,
random_state=random_state)
# define a conformal normalizer object that uses the two regression functions.
# The nonconformity score is absolute residual error
normalizer = RegressorNormalizer(mean_estimator,
mad_estimator,
AbsErrorErrFunc())
# define the final local conformal object
nc = RegressorNc(mean_estimator, AbsErrorErrFunc(), normalizer)
# build the split local conformal object
icp = IcpRegressor(nc)
# fit the conditional mean and MAD models to proper training data
icp.fit(x_train[idx_train], y_train[idx_train])
# compute the absolute residual error on calibration data
icp.calibrate(x_train[idx_cal], y_train[idx_cal])
# produce predictions for the test set, with confidence equal to significance
predictions = icp.predict(x_test, significance=alpha)
# extract the lower and upper bound of the prediction interval
y_lower = predictions[:,0]
y_upper = predictions[:,1]
# compute the conditional mean estimation
pred = mean_estimator.predict(x_test)
# display the results
plot_func(x=x_test,y=y_test,y_u=y_upper,y_l=y_lower,pred=pred,shade_color=local_color,
method_name="Local (mean):",title="",
filename="illustration_split_local_rf.png",save_figures=save_figures,
label_observations="", label_estimate="")
# compute and display the average coverage
in_the_range = np.sum((y_test >= y_lower) & (y_test <= y_upper))
print("Local Random Forests: Percentage in the range (expecting " + str(100*(1-alpha)) + "%):",
in_the_range / len(y_test) * 100)
# compute length of the interval per each test point
length_local_rf = y_upper - y_lower
# compute and display the average length
print("Local Random Forests: Average length:", np.mean(length_local_rf))
```
The prediction intervals constructed by the local split conformal also achieves valid coverage. The intervals are partially adaptive, resulting in slightly shorter intervals than the usual split conformal, but inferior than the ones obtained by CQR.
## Local conformal with median regression
To improve robustness to outliers, one might try to estimate the conditional median instead of the conditional mean in locally adaptive conformal prediction. The residuals are scaled in the usual way, by classical regression via random forests. We implement this strategy in the following section.
```
from skgarden import RandomForestQuantileRegressor
from nonconformist.base import RegressorAdapter
# replace conditional mean by conditional median estimator
class MedianRegressorAdapter(RegressorAdapter):
""" Conditional median estimator, defined as quantile random forests (QRF)
References
----------
.. [1] Meinshausen, Nicolai. "Quantile regression forests."
Journal of Machine Learning Research 7.Jun (2006): 983-999.
"""
def __init__(self, model, fit_params=None, quantiles=[50], params=None):
super(MedianRegressorAdapter, self).__init__(model, fit_params)
# Instantiate model
self.quantiles = quantiles
self.cv_quantiles = self.quantiles
self.params = params
self.rfqr = RandomForestQuantileRegressor(random_state=params["random_state"],
min_samples_leaf=params["min_samples_leaf"],
n_estimators=params["n_estimators"],
max_features=params["max_features"])
def fit(self, x, y):
self.rfqr.fit(x, y)
def predict(self, x):
return self.rfqr.predict(x, quantile=50)
# define the conditional median model as random forests regressor (used to predict the labels)
median_estimator = MedianRegressorAdapter(model=None,
fit_params=None,
quantiles=[50],
params=params_qforest)
# define the MAD estimator as usual (mean) random forests regressor (used to scale the absolute residuals)
mad_estimator = RandomForestRegressor(n_estimators=n_estimators,
min_samples_leaf=min_samples_leaf,
max_features=max_features,
random_state=random_state)
# define a conformal normalizer object that uses the two regression functions.
# The nonconformity score is absolute residual error
normalizer = RegressorNormalizer(median_estimator,
mad_estimator,
AbsErrorErrFunc())
# define the final local conformal object
nc = RegressorNc(median_estimator, AbsErrorErrFunc(), normalizer)
# build the split local conformal object
icp = IcpRegressor(nc)
# fit the conditional mean and usual MAD models to proper training data
icp.fit(x_train[idx_train], y_train[idx_train])
# compute the absolute residual error on calibration data
icp.calibrate(x_train[idx_cal], y_train[idx_cal])
# produce predictions for the test set, with confidence equal to significance
predictions = icp.predict(x_test, significance=alpha)
y_lower = predictions[:,0]
y_upper = predictions[:,1]
# compute the conditional median estimation
pred = median_estimator.predict(x_test)
# display the results
plot_func(x=x_test,y=y_test,y_u=y_upper,y_l=y_lower,pred=pred,shade_color=local_color,
method_name="Local (median):",title="",
filename="illustration_split_local_median_rf.png",save_figures=save_figures,
label_observations="", label_estimate="")
# compute and display the average coverage
in_the_range = np.sum((y_test >= y_lower) & (y_test <= y_upper))
print("Local Median Random Forests: Percentage in the range (expecting " + str(100*(1-alpha)) + "%):",
in_the_range / len(y_test) * 100)
# compute and display the average length
print("Local Median Random Forests: Average length:", np.mean(y_upper - y_lower))
```
|
github_jupyter
|
import warnings
warnings.filterwarnings('ignore')
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
np.warnings.filterwarnings('ignore')
split_color = 'tomato'
local_color = 'gray'
cqr_color = 'lightblue'
%matplotlib inline
np.random.seed(6)
# desired miscoverage error
alpha = 0.1
# low and high target quantiles
quantiles = [5, 95]
# maximal number of testpoints to plot
max_show = 1000
# save figures?
save_figures = False
# parameters of random forests
n_estimators = 100
min_samples_leaf = 100 # 40
max_features = 1 # 1D signal
random_state = 0
def plot_func(x,
y,
y_u=None,
y_l=None,
pred=None,
shade_color="",
method_name="",
title="",
filename=None,
save_figures=False,
label_observations="Observations",
label_estimate="Predicted value"):
""" Scatter plot of (x,y) points along with the constructed prediction interval
Parameters
----------
x : numpy array, corresponding to the feature of each of the n samples
y : numpy array, target response variable (length n)
pred : numpy array, the estimated prediction. It may be the conditional mean,
or low and high conditional quantiles.
shade_color : string, desired color of the prediciton interval
method_name : string, name of the method
title : string, the title of the figure
filename : sting, name of the file to save the figure
save_figures : boolean, save the figure (True) or not (False)
"""
x_ = x[:max_show]
y_ = y[:max_show]
if y_u is not None:
y_u_ = y_u[:max_show]
if y_l is not None:
y_l_ = y_l[:max_show]
if pred is not None:
pred_ = pred[:max_show]
fig = plt.figure()
inds = np.argsort(np.squeeze(x_))
plt.plot(x_[inds,:], y_[inds], 'k.', alpha=.2, markersize=10,
fillstyle='none', label=label_observations)
if (y_u is not None) and (y_l is not None):
plt.fill(np.concatenate([x_[inds], x_[inds][::-1]]),
np.concatenate([y_u_[inds], y_l_[inds][::-1]]),
alpha=.3, fc=shade_color, ec='None',
label = method_name + ' prediction interval')
if pred is not None:
if pred_.ndim == 2:
plt.plot(x_[inds,:], pred_[inds,0], 'k', lw=2, alpha=0.9,
label=u'Predicted low and high quantiles')
plt.plot(x_[inds,:], pred_[inds,1], 'k', lw=2, alpha=0.9)
else:
plt.plot(x_[inds,:], pred_[inds], 'k--', lw=2, alpha=0.9,
label=label_estimate)
plt.ylim([-250, 200])
plt.xlabel('$X$')
plt.ylabel('$Y$')
plt.legend(loc='best')
plt.title(title)
if save_figures and (filename is not None):
plt.savefig(filename, bbox_inches='tight', dpi=300)
plt.show()
from scipy.stats import cauchy
# number of training examples
n_train = 2000
# number of test examples (to evaluate average coverage and length)
n_test = 5000
def f(x):
''' Construct data (1D example)
'''
ax = 0*x
for i in range(len(x)):
ax[i] = cauchy.rvs(loc = 0, scale = 6*(np.sin(x[i]))**2, size = 1)
x = ax
return x.astype(np.float32)
# training features
x_train = np.random.uniform(0, 10.0, size=n_train).astype(np.float32)
# test features
x_test = np.random.uniform(0, 10.0, size=n_test).astype(np.float32)
# generate labels
y_train = f(x_train)
y_test = f(x_test)
# reshape the features
x_train = np.reshape(x_train,(n_train,1))
x_test = np.reshape(x_test,(n_test,1))
# display the test data in full range (including the outliers)
fig = plt.figure()
plt.plot(x_test, y_test, 'k.', alpha = 0.3, markersize=10,
fillstyle='none', label=u'Observations')
plt.legend()
plt.xlabel('$X$')
plt.ylabel('$Y$')
if save_figures:
plt.savefig("illustration_test_data_cauchy.png",
bbox_inches='tight', dpi=300)
plt.show()
# display the test data without outliers (zoom in)
plot_func(x_test,y_test,title="Test data (zoom in)")
# divide the data into proper training set and calibration set
idx = np.random.permutation(n_train)
n_half = int(np.floor(n_train/2))
idx_train, idx_cal = idx[:n_half], idx[n_half:2*n_half]
from cqr import helper
from nonconformist.nc import RegressorNc
from nonconformist.cp import IcpRegressor
from nonconformist.nc import QuantileRegErrFunc
# define quantile random forests (QRF) parameters
params_qforest = dict()
params_qforest["n_estimators"] = n_estimators
params_qforest["min_samples_leaf"] = min_samples_leaf
params_qforest["max_features"] = max_features
params_qforest["CV"] = True
params_qforest["coverage_factor"] = 1
params_qforest["test_ratio"] = 0.1
params_qforest["random_state"] = random_state
params_qforest["range_vals"] = 10
params_qforest["num_vals"] = 4
# define the QRF model
quantile_estimator = helper.QuantileForestRegressorAdapter(model=None,
fit_params=None,
quantiles=quantiles,
params=params_qforest)
# define the CQR object, computing the absolute residual error of points
# located outside the estimated QRF band
nc = RegressorNc(quantile_estimator, QuantileRegErrFunc())
# build the split CQR object
icp = IcpRegressor(nc)
# fit the conditional quantile regression to the proper training data
icp.fit(x_train[idx_train], y_train[idx_train])
# compute the absolute errors on calibration data
icp.calibrate(x_train[idx_cal], y_train[idx_cal])
# produce predictions for the test set, with confidence equal to significance
predictions = icp.predict(x_test, significance=alpha)
y_lower = predictions[:,0]
y_upper = predictions[:,1]
# compute the low and high conditional quantile estimation
pred = quantile_estimator.predict(x_test)
# display the results
plot_func(x=x_test,y=y_test,y_u=y_upper,y_l=y_lower,pred=pred,shade_color=cqr_color,
method_name="CQR:",title="",
filename="illustration_split_qrf_cauchy.png",save_figures=save_figures,
label_observations="")
# compute and display the average coverage
in_the_range = np.sum((y_test >= y_lower) & (y_test <= y_upper))
print("CQR Random Forests: Percentage in the range (expecting " + str(100*(1-alpha)) + "%):",
in_the_range / len(y_test) * 100)
# compute length of the conformal interval per each test point
length_cqr_rf = y_upper - y_lower
# compute and display the average length
print("CQR Random Forests: Average length:", np.mean(length_cqr_rf))
from sklearn.ensemble import RandomForestRegressor
from nonconformist.nc import RegressorNormalizer
from nonconformist.nc import AbsErrorErrFunc
# define the conditonal mean estimator as random forests
mean_estimator = RandomForestRegressor(n_estimators=n_estimators,
min_samples_leaf=min_samples_leaf,
max_features=max_features,
random_state=random_state)
# define a conformal prediction object
nc = RegressorNc(mean_estimator, AbsErrorErrFunc())
# build a regualr split conformal prediction object
icp = IcpRegressor(nc)
# fit the conditional mean regression to the proper training data
icp.fit(x_train[idx_train], y_train[idx_train])
# compute the absolute residual error on calibration data
icp.calibrate(x_train[idx_cal], y_train[idx_cal])
# produce predictions for the test set, with confidence equal to significance
predictions = icp.predict(x_test, significance=alpha)
y_lower = predictions[:,0]
y_upper = predictions[:,1]
# compute the conditional mean estimation
pred = mean_estimator.predict(x_test)
# display the results
plot_func(x=x_test,y=y_test,y_u=y_upper,y_l=y_lower,pred=pred,shade_color=split_color,
method_name="Split:",title="",
filename="illustration_split_rf.png",save_figures=save_figures,
label_observations="")
# compute and display the average coverage
in_the_range = np.sum((y_test >= y_lower) & (y_test <= y_upper))
print("Random Forests: Percentage in the range (expecting " + str(100*(1-alpha)) + "%):",
in_the_range / len(y_test) * 100)
# compute length of the interval per each test point
length_split_rf = y_upper - y_lower
# compute and display the average length
print("Random Forests: Average length:", np.mean(length_split_rf))
# define the conditonal mean estimator as random forests (used to predict the labels)
mean_estimator = RandomForestRegressor(n_estimators=n_estimators,
min_samples_leaf=min_samples_leaf,
max_features=max_features,
random_state=random_state)
# define the MAD estimator as random forests (used to scale the absolute residuals)
mad_estimator = RandomForestRegressor(n_estimators=n_estimators,
min_samples_leaf=min_samples_leaf,
max_features=max_features,
random_state=random_state)
# define a conformal normalizer object that uses the two regression functions.
# The nonconformity score is absolute residual error
normalizer = RegressorNormalizer(mean_estimator,
mad_estimator,
AbsErrorErrFunc())
# define the final local conformal object
nc = RegressorNc(mean_estimator, AbsErrorErrFunc(), normalizer)
# build the split local conformal object
icp = IcpRegressor(nc)
# fit the conditional mean and MAD models to proper training data
icp.fit(x_train[idx_train], y_train[idx_train])
# compute the absolute residual error on calibration data
icp.calibrate(x_train[idx_cal], y_train[idx_cal])
# produce predictions for the test set, with confidence equal to significance
predictions = icp.predict(x_test, significance=alpha)
# extract the lower and upper bound of the prediction interval
y_lower = predictions[:,0]
y_upper = predictions[:,1]
# compute the conditional mean estimation
pred = mean_estimator.predict(x_test)
# display the results
plot_func(x=x_test,y=y_test,y_u=y_upper,y_l=y_lower,pred=pred,shade_color=local_color,
method_name="Local (mean):",title="",
filename="illustration_split_local_rf.png",save_figures=save_figures,
label_observations="", label_estimate="")
# compute and display the average coverage
in_the_range = np.sum((y_test >= y_lower) & (y_test <= y_upper))
print("Local Random Forests: Percentage in the range (expecting " + str(100*(1-alpha)) + "%):",
in_the_range / len(y_test) * 100)
# compute length of the interval per each test point
length_local_rf = y_upper - y_lower
# compute and display the average length
print("Local Random Forests: Average length:", np.mean(length_local_rf))
from skgarden import RandomForestQuantileRegressor
from nonconformist.base import RegressorAdapter
# replace conditional mean by conditional median estimator
class MedianRegressorAdapter(RegressorAdapter):
""" Conditional median estimator, defined as quantile random forests (QRF)
References
----------
.. [1] Meinshausen, Nicolai. "Quantile regression forests."
Journal of Machine Learning Research 7.Jun (2006): 983-999.
"""
def __init__(self, model, fit_params=None, quantiles=[50], params=None):
super(MedianRegressorAdapter, self).__init__(model, fit_params)
# Instantiate model
self.quantiles = quantiles
self.cv_quantiles = self.quantiles
self.params = params
self.rfqr = RandomForestQuantileRegressor(random_state=params["random_state"],
min_samples_leaf=params["min_samples_leaf"],
n_estimators=params["n_estimators"],
max_features=params["max_features"])
def fit(self, x, y):
self.rfqr.fit(x, y)
def predict(self, x):
return self.rfqr.predict(x, quantile=50)
# define the conditional median model as random forests regressor (used to predict the labels)
median_estimator = MedianRegressorAdapter(model=None,
fit_params=None,
quantiles=[50],
params=params_qforest)
# define the MAD estimator as usual (mean) random forests regressor (used to scale the absolute residuals)
mad_estimator = RandomForestRegressor(n_estimators=n_estimators,
min_samples_leaf=min_samples_leaf,
max_features=max_features,
random_state=random_state)
# define a conformal normalizer object that uses the two regression functions.
# The nonconformity score is absolute residual error
normalizer = RegressorNormalizer(median_estimator,
mad_estimator,
AbsErrorErrFunc())
# define the final local conformal object
nc = RegressorNc(median_estimator, AbsErrorErrFunc(), normalizer)
# build the split local conformal object
icp = IcpRegressor(nc)
# fit the conditional mean and usual MAD models to proper training data
icp.fit(x_train[idx_train], y_train[idx_train])
# compute the absolute residual error on calibration data
icp.calibrate(x_train[idx_cal], y_train[idx_cal])
# produce predictions for the test set, with confidence equal to significance
predictions = icp.predict(x_test, significance=alpha)
y_lower = predictions[:,0]
y_upper = predictions[:,1]
# compute the conditional median estimation
pred = median_estimator.predict(x_test)
# display the results
plot_func(x=x_test,y=y_test,y_u=y_upper,y_l=y_lower,pred=pred,shade_color=local_color,
method_name="Local (median):",title="",
filename="illustration_split_local_median_rf.png",save_figures=save_figures,
label_observations="", label_estimate="")
# compute and display the average coverage
in_the_range = np.sum((y_test >= y_lower) & (y_test <= y_upper))
print("Local Median Random Forests: Percentage in the range (expecting " + str(100*(1-alpha)) + "%):",
in_the_range / len(y_test) * 100)
# compute and display the average length
print("Local Median Random Forests: Average length:", np.mean(y_upper - y_lower))
| 0.717408 | 0.968856 |
```
import rlssm
import pandas as pd
import os
```
#### Import the grouped data
```
par_path = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
data_path = os.path.join(par_path, 'data/data_experiment.csv')
data = pd.read_csv(data_path, index_col=0)
data = data[data.participant < 5].reset_index(drop=True)
data['block_label'] += 1
data.head()
```
#### Initialise the model
```
model = rlssm.RLModel_2A(hierarchical_levels = 2, increasing_sensitivity=True, separate_learning_rates=True)
model.family, model.model_label, model.hierarchical_levels
model.increasing_sensitivity, model.separate_learning_rates
```
#### Fit
```
# sampling parameters
n_iter = 1000
n_chains = 2
n_thin = 1
# learning parameters
K = 4 # n options
initial_value_learning = 27.5 # intitial value (Q0)
# bayesian model
alpha_pos_priors = {'mu_mu':0, 'sd_mu':.8, 'mu_sd':0, 'sd_sd':.5}
# User-Supplied Initial Values:
n_participants = len(data.participant.unique())
def starting_values(chain_id, n_participants=n_participants):
import numpy as np
from scipy import stats
out = {
'mu_alpha_pos': stats.norm.ppf(.1),
'mu_alpha_neg': stats.norm.ppf(.1),
'mu_consistency': np.log(.5),
'mu_scaling': 5,
'sd_alpha_pos':.1,
'sd_alpha_neg':.1,
'sd_consistency':.1,
'sd_scaling':.1,
'z_alpha_pos':list(np.random.normal(0, 1, n_participants)),
'z_alpha_neg':list(np.random.normal(0, 1, n_participants)),
'z_consistency':list(np.random.normal(0, 1, n_participants)),
'z_scaling':list(np.random.normal(0, 1, n_participants))
}
return out
model_fit = model.fit(
data,
K,
initial_value_learning,
alpha_pos_priors = alpha_pos_priors,
thin = n_thin,
iter = n_iter,
chains = n_chains)
```
#### get Rhat
```
model_fit.rhat.describe()
model_fit.rhat.head()
```
#### get wAIC
```
model_fit.waic
```
### Posteriors
```
model_fit.samples
model_fit.trial_samples
import seaborn as sns
sns.set(context = "talk",
style = "white",
palette = "husl",
rc={'figure.figsize':(15, 8)})
model_fit.plot_posteriors(height=5, show_intervals="HDI", alpha_intervals=.05);
model_fit.plot_posteriors(height=5, show_intervals="BCI", alpha_intervals=.1, clip=(0, 1));
```
### Posterior predictives
#### Ungrouped
```
pp = model_fit.get_posterior_predictives_df(n_posterior_predictives=500)
pp
pp_summary = model_fit.get_posterior_predictives_summary(n_posterior_predictives=500)
pp_summary
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1, figsize=(10, 8))
model_fit.plot_mean_posterior_predictives(n_posterior_predictives=500, ax=ax, show_intervals='HDI')
ax.set_ylabel('Density')
ax.set_xlabel('Mean accuracy')
sns.despine()
```
#### Grouped
```
import numpy as np
data['choice_pair'] = 'AB'
data.loc[(data.cor_option == 3) & (data.inc_option == 1), 'choice_pair'] = 'AC'
data.loc[(data.cor_option == 4) & (data.inc_option == 2), 'choice_pair'] = 'BD'
data.loc[(data.cor_option == 4) & (data.inc_option == 3), 'choice_pair'] = 'CD'
data['block_bins'] = pd.cut(data.trial_block, 8, labels=np.arange(1, 9))
data.head()
model_fit.get_grouped_posterior_predictives_summary(grouping_vars=['block_label', 'block_bins', 'choice_pair'], n_posterior_predictives=500)
import matplotlib.pyplot as plt
fig, axes = plt.subplots(1, 2, figsize=(20,8))
model_fit.plot_mean_grouped_posterior_predictives(grouping_vars=['block_bins'], n_posterior_predictives=500, ax=axes[0])
model_fit.plot_mean_grouped_posterior_predictives(grouping_vars=['block_bins', 'choice_pair'], n_posterior_predictives=500, ax=axes[1])
sns.despine()
```
### Get last values for eventual further sampling
```
sv = model_fit.last_values
sv
```
|
github_jupyter
|
import rlssm
import pandas as pd
import os
par_path = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
data_path = os.path.join(par_path, 'data/data_experiment.csv')
data = pd.read_csv(data_path, index_col=0)
data = data[data.participant < 5].reset_index(drop=True)
data['block_label'] += 1
data.head()
model = rlssm.RLModel_2A(hierarchical_levels = 2, increasing_sensitivity=True, separate_learning_rates=True)
model.family, model.model_label, model.hierarchical_levels
model.increasing_sensitivity, model.separate_learning_rates
# sampling parameters
n_iter = 1000
n_chains = 2
n_thin = 1
# learning parameters
K = 4 # n options
initial_value_learning = 27.5 # intitial value (Q0)
# bayesian model
alpha_pos_priors = {'mu_mu':0, 'sd_mu':.8, 'mu_sd':0, 'sd_sd':.5}
# User-Supplied Initial Values:
n_participants = len(data.participant.unique())
def starting_values(chain_id, n_participants=n_participants):
import numpy as np
from scipy import stats
out = {
'mu_alpha_pos': stats.norm.ppf(.1),
'mu_alpha_neg': stats.norm.ppf(.1),
'mu_consistency': np.log(.5),
'mu_scaling': 5,
'sd_alpha_pos':.1,
'sd_alpha_neg':.1,
'sd_consistency':.1,
'sd_scaling':.1,
'z_alpha_pos':list(np.random.normal(0, 1, n_participants)),
'z_alpha_neg':list(np.random.normal(0, 1, n_participants)),
'z_consistency':list(np.random.normal(0, 1, n_participants)),
'z_scaling':list(np.random.normal(0, 1, n_participants))
}
return out
model_fit = model.fit(
data,
K,
initial_value_learning,
alpha_pos_priors = alpha_pos_priors,
thin = n_thin,
iter = n_iter,
chains = n_chains)
model_fit.rhat.describe()
model_fit.rhat.head()
model_fit.waic
model_fit.samples
model_fit.trial_samples
import seaborn as sns
sns.set(context = "talk",
style = "white",
palette = "husl",
rc={'figure.figsize':(15, 8)})
model_fit.plot_posteriors(height=5, show_intervals="HDI", alpha_intervals=.05);
model_fit.plot_posteriors(height=5, show_intervals="BCI", alpha_intervals=.1, clip=(0, 1));
pp = model_fit.get_posterior_predictives_df(n_posterior_predictives=500)
pp
pp_summary = model_fit.get_posterior_predictives_summary(n_posterior_predictives=500)
pp_summary
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1, figsize=(10, 8))
model_fit.plot_mean_posterior_predictives(n_posterior_predictives=500, ax=ax, show_intervals='HDI')
ax.set_ylabel('Density')
ax.set_xlabel('Mean accuracy')
sns.despine()
import numpy as np
data['choice_pair'] = 'AB'
data.loc[(data.cor_option == 3) & (data.inc_option == 1), 'choice_pair'] = 'AC'
data.loc[(data.cor_option == 4) & (data.inc_option == 2), 'choice_pair'] = 'BD'
data.loc[(data.cor_option == 4) & (data.inc_option == 3), 'choice_pair'] = 'CD'
data['block_bins'] = pd.cut(data.trial_block, 8, labels=np.arange(1, 9))
data.head()
model_fit.get_grouped_posterior_predictives_summary(grouping_vars=['block_label', 'block_bins', 'choice_pair'], n_posterior_predictives=500)
import matplotlib.pyplot as plt
fig, axes = plt.subplots(1, 2, figsize=(20,8))
model_fit.plot_mean_grouped_posterior_predictives(grouping_vars=['block_bins'], n_posterior_predictives=500, ax=axes[0])
model_fit.plot_mean_grouped_posterior_predictives(grouping_vars=['block_bins', 'choice_pair'], n_posterior_predictives=500, ax=axes[1])
sns.despine()
sv = model_fit.last_values
sv
| 0.488771 | 0.78789 |
```
import sympy as sp
from sympy.plotting import plot
%matplotlib inline
sp.init_printing()
```
# Лабораторна робота №1
<img src="http://civil.engr.siu.edu/cheval/engr351/Images/ENGR351.jpg" width="500px" height="300px" \>
### Умова задачі
Задано функцію $f(x)$, потрібно знайти корінь цієї функції, тобто хоча б одне значення параметру $x=x_0$, при якому $f(x_0)=0$. Якщо такого значення не існує повернути $null$.
Розглянемо три різні методи розвязку даної задачі:
1. Метод дихотомії
2. Метод Нютона
3. Метод простої ітерації
Кожен з цих методів має свої недоліки і переваги, тому немає однозначно найкращого методу для розвязання цїєї задачі.
Для початку введемо декілька загальнопринятих позначень: $\epsilon$ та $x$ як символи бібліотеки SymPy
```
EPS = sp.Rational("1e-3")
x = sp.Symbol("x")
```
Визначимо функцію $fun$, для якої ми збираємося шукати корінь
```
fun = x * x * x - 2 * x
plot(fun, (x, -2, 2))
```
Та її похідну $der$, що необхідна для коректної роботи деяких методів
```
der = sp.diff(fun, x)
plot(der, (x, -2, 2))
```
### Метод дихотомії
Метод полягає у зменшені відрузку що розглядається вдвічі на кожній ітерації. **Необхідна умова** для застосування цього метода $f(a) \cdot f(b) <= 0$
#### Алгоритм
Покладемо $l = a, r = b$, тоді виконується інваріант $f(l) \cdot f(r) <=0$. Покажемо що він зберігається на кожній ітерації.
На кожній ітерації циклу вибирається точка $m = \large\frac{l + r}{2}$, і перевіряється умова $f(a) \cdot f(m) <= 0$.
Якщо вона виконується, тоді корінь знаходиться на проміжку $[a; m]$, інакше корінь треба шукати на проміжку $[m; b]$.
Рекурсивно виконуємо функцію пошуку для одного з вище вказаних проміжків.
```
def dih(a, b, f=fun, eps=EPS):
print("[{}; {}]".format(a, b))
if f.subs(x, a) * f.subs(x, b) > 0:
return None
if a > b:
a, b = b, a
if (b - a).evalf() <= EPS / sp.Integer(2):
return a
m = a + (b - a) / sp.Integer(2)
if f.subs(x, a) * f.subs(x, m) <= 0:
return dih(a, m, f, eps)
else:
return dih(m, b, f, eps)
res = dih(a=-5, b=sp.Rational('-0.1'))
"Result {}".format(sp.N(res))
```
### Метод Нютона
Метод полягає в
```
def newton(x0, f=fun, d=der, eps=EPS):
x1 = x0 - f.subs(x, x0) / d.subs(x, x0)
print(x1)
while sp.Abs(x1 - x0).evalf() > EPS / sp.Integer(2):
x0, x1 = x1, x1 - f.subs(x, x1) / d.subs(x, x1)
print(x1)
return x1
res = newton(x0=sp.Rational("0.7"))
"Result {}".format(sp.N(res, 10))
```
### Метод простої ітерації
```
alpha = sp.Symbol("alpha")
h = x - fun * alpha
h
def simple(x0, alpha, f=fun, eps=EPS):
h = x - alpha * f
x1 = h.subs(x, x0)
print("[{}; {}]".format(x0, x1))
while abs(x1 - x0) > EPS / sp.Integer(2):
x0, x1 = x1, h.subs(x, x1)
print("[{}; {}]".format(x0, x1))
return x1
res = simple(x0=-3, alpha=1/10)
"Result {}".format(sp.N(res, 10))
```
|
github_jupyter
|
import sympy as sp
from sympy.plotting import plot
%matplotlib inline
sp.init_printing()
EPS = sp.Rational("1e-3")
x = sp.Symbol("x")
fun = x * x * x - 2 * x
plot(fun, (x, -2, 2))
der = sp.diff(fun, x)
plot(der, (x, -2, 2))
def dih(a, b, f=fun, eps=EPS):
print("[{}; {}]".format(a, b))
if f.subs(x, a) * f.subs(x, b) > 0:
return None
if a > b:
a, b = b, a
if (b - a).evalf() <= EPS / sp.Integer(2):
return a
m = a + (b - a) / sp.Integer(2)
if f.subs(x, a) * f.subs(x, m) <= 0:
return dih(a, m, f, eps)
else:
return dih(m, b, f, eps)
res = dih(a=-5, b=sp.Rational('-0.1'))
"Result {}".format(sp.N(res))
def newton(x0, f=fun, d=der, eps=EPS):
x1 = x0 - f.subs(x, x0) / d.subs(x, x0)
print(x1)
while sp.Abs(x1 - x0).evalf() > EPS / sp.Integer(2):
x0, x1 = x1, x1 - f.subs(x, x1) / d.subs(x, x1)
print(x1)
return x1
res = newton(x0=sp.Rational("0.7"))
"Result {}".format(sp.N(res, 10))
alpha = sp.Symbol("alpha")
h = x - fun * alpha
h
def simple(x0, alpha, f=fun, eps=EPS):
h = x - alpha * f
x1 = h.subs(x, x0)
print("[{}; {}]".format(x0, x1))
while abs(x1 - x0) > EPS / sp.Integer(2):
x0, x1 = x1, h.subs(x, x1)
print("[{}; {}]".format(x0, x1))
return x1
res = simple(x0=-3, alpha=1/10)
"Result {}".format(sp.N(res, 10))
| 0.408867 | 0.945801 |
```
import gym
import time
import math
import random
import numpy as np
import matplotlib.pyplot as plt
from IPython.display import clear_output
######## Create the envisornment and set up its variables
env = gym.make('FrozenLake-v0')
# env = gym.make('FrozenLake8x8-v0')
action_space_size = env.action_space.n
state_space_size = env.observation_space.n
####### Build the Q table that we'll be using to reference actions
q_table = np.zeros((state_space_size, action_space_size))
q_table
num_episodes = 10_000
max_steps_per_episode = 100
learning_rate = 0.1
discount_rate = 0.99
exploration_rate = 1
max_exploration_rate = 1
min_exploration_rate = 0.01
exploration_decay_rate = 0.001
##### Learning Loop
rewards_all_episodes = []
for episode in range(num_episodes):
state = env.reset()
done = False
rewards_current_episode = 0
for step in range(max_steps_per_episode):
# Exploration-exploitation trade-off
exploration_rate_threshold = random.uniform(0, 1)
if exploration_rate_threshold > exploration_rate:
action = np.argmax(q_table[state,:])
else:
action = env.action_space.sample()
new_state, reward, done, info = env.step(action)
# Update Q-table for Q(s,a)
q_table[state, action] = q_table[state, action] * (1 - learning_rate) + \
learning_rate * (reward + discount_rate * np.max(q_table[new_state, :]))
state = new_state
rewards_current_episode += reward
if done == True:
break
# Exploration rate decay
exploration_rate = min_exploration_rate + \
(max_exploration_rate - min_exploration_rate) * np.exp(-exploration_decay_rate*episode)
rewards_all_episodes.append(rewards_current_episode)
rewards_per_thousand = np.split(np.array(rewards_all_episodes),num_episodes/1000)
print('*'*10,' Rewards Per 1000 Episode ','*'*10,'\n\n')
for i in range(len(rewards_per_thousand)):
print(f'{(i+1) *1000}: {rewards_per_thousand[i].mean()}')
'''
Possible Actions:
Left = 0
Down = 1
Right = 2
Up = 3
'''
n = int(np.sqrt(state_space_size))
policy = np.argmax(q_table,axis=1)
moves = ['L','D','R','U']
solution = np.empty(state_space_size,dtype=np.str)
for i,j in enumerate(policy):
solution[i] = moves[j]
t = ''.join(i for i in solution)
print(f'*** Policy \u03C0 ***\n{t[:n]}\n{t[n:2*n]}\n{t[2*n:3*n]}\n{t[3*n:4*n]}')
for e in range(10):
state = env.reset()
done = False
while not done:
env.render()
time.sleep(0.09)
clear_output(wait=True)
action = np.argmax(q_table[state])
new_state,reward,done,_ = env.step(action)
state = new_state
if reward == 1:
print('*** Successfull ***')
time.sleep(1)
break
if done:
print('*** Sorry ***')
time.sleep(0.3)
break
```
|
github_jupyter
|
import gym
import time
import math
import random
import numpy as np
import matplotlib.pyplot as plt
from IPython.display import clear_output
######## Create the envisornment and set up its variables
env = gym.make('FrozenLake-v0')
# env = gym.make('FrozenLake8x8-v0')
action_space_size = env.action_space.n
state_space_size = env.observation_space.n
####### Build the Q table that we'll be using to reference actions
q_table = np.zeros((state_space_size, action_space_size))
q_table
num_episodes = 10_000
max_steps_per_episode = 100
learning_rate = 0.1
discount_rate = 0.99
exploration_rate = 1
max_exploration_rate = 1
min_exploration_rate = 0.01
exploration_decay_rate = 0.001
##### Learning Loop
rewards_all_episodes = []
for episode in range(num_episodes):
state = env.reset()
done = False
rewards_current_episode = 0
for step in range(max_steps_per_episode):
# Exploration-exploitation trade-off
exploration_rate_threshold = random.uniform(0, 1)
if exploration_rate_threshold > exploration_rate:
action = np.argmax(q_table[state,:])
else:
action = env.action_space.sample()
new_state, reward, done, info = env.step(action)
# Update Q-table for Q(s,a)
q_table[state, action] = q_table[state, action] * (1 - learning_rate) + \
learning_rate * (reward + discount_rate * np.max(q_table[new_state, :]))
state = new_state
rewards_current_episode += reward
if done == True:
break
# Exploration rate decay
exploration_rate = min_exploration_rate + \
(max_exploration_rate - min_exploration_rate) * np.exp(-exploration_decay_rate*episode)
rewards_all_episodes.append(rewards_current_episode)
rewards_per_thousand = np.split(np.array(rewards_all_episodes),num_episodes/1000)
print('*'*10,' Rewards Per 1000 Episode ','*'*10,'\n\n')
for i in range(len(rewards_per_thousand)):
print(f'{(i+1) *1000}: {rewards_per_thousand[i].mean()}')
'''
Possible Actions:
Left = 0
Down = 1
Right = 2
Up = 3
'''
n = int(np.sqrt(state_space_size))
policy = np.argmax(q_table,axis=1)
moves = ['L','D','R','U']
solution = np.empty(state_space_size,dtype=np.str)
for i,j in enumerate(policy):
solution[i] = moves[j]
t = ''.join(i for i in solution)
print(f'*** Policy \u03C0 ***\n{t[:n]}\n{t[n:2*n]}\n{t[2*n:3*n]}\n{t[3*n:4*n]}')
for e in range(10):
state = env.reset()
done = False
while not done:
env.render()
time.sleep(0.09)
clear_output(wait=True)
action = np.argmax(q_table[state])
new_state,reward,done,_ = env.step(action)
state = new_state
if reward == 1:
print('*** Successfull ***')
time.sleep(1)
break
if done:
print('*** Sorry ***')
time.sleep(0.3)
break
| 0.278747 | 0.533337 |
This notebook makes an island taking the original bathy_meter.nc and makes two square island of the given rimwidth at the NW and SW edges
```
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import netCDF4 as nc
import xarray as xr
import matplotlib.cm as cm
from scipy.interpolate import interp1d
from salishsea_tools import (nc_tools, gsw_calls, viz_tools)
bathy_file = nc.Dataset('/ocean/ssahu/CANYONS/wcvi/grid/bathy_meter.nc');
bathy = bathy_file.variables['Bathymetry'][:];
lon = bathy_file.variables['nav_lon'][:];
lat = bathy_file.variables['nav_lat'][:];
bathy.shape
bathy
bathy[0:4,0:4] = 0.0;
bathy[-4:,0:4] = 0.0;
fig, ax = plt.subplots(1, 1, figsize=(10, 8))
viz_tools.set_aspect(ax)
mesh = ax.pcolormesh(lon, lat, bathy, cmap =cm.ocean)
fig.colorbar(mesh)
plt.show()
fig, ax = plt.subplots(1, 1, figsize=(10, 8))
viz_tools.set_aspect(ax)
mesh = ax.pcolormesh(bathy, cmap =cm.ocean)
fig.colorbar(mesh)
plt.show()
bathy[0,1]
def writebathy(filename,glamt,gphit,bathy):
bnc = nc.Dataset(filename, 'w', clobber=True)
NY,NX = glamt.shape
# Create the dimensions
bnc.createDimension('x', NX)
bnc.createDimension('y', NY)
bnc.createVariable('nav_lon', 'f', ('y', 'x'), zlib=True, complevel=4)
bnc.variables['nav_lon'].setncattr('units', 'degrees_east')
bnc.createVariable('nav_lat', 'f', ('y', 'x'), zlib=True, complevel=4)
bnc.variables['nav_lat'].setncattr('units', 'degrees_north')
bnc.createVariable('Bathymetry', 'd', ('y', 'x'), zlib=True, complevel=4, fill_value=0)
bnc.variables['Bathymetry'].setncattr('units', 'metres')
bnc.variables['nav_lon'][:] = glamt
bnc.variables['nav_lat'][:] = gphit
bnc.variables['Bathymetry'][:] = bathy
bnc.close()
# Write Bathymetry to NetCDF file
writebathy('/ocean/ssahu/CANYONS/wcvi/grid/bathy_files/island_bathy_meter.nc',lon,lat,bathy)
```
### Copy this file to wcvi/grid and rename it as bathy_meter.nc to give a run using island bathy. A copy of the original bathy is retained in the bathy_files subdirectory in wcvi/grid
#### From the plots in Westcoastattempt38 we find that the way NEMO reads the files are switched the other way around for the east and the west files
```
### Let us load the boundary files of the west and east files
west_bdy_sal = nc.Dataset('/ocean/ssahu/CANYONS/bdy_files/3d_NEMO_west_m04.nc').variables['vosaline'][:];
west_bdy_temp = nc.Dataset('/ocean/ssahu/CANYONS/bdy_files/3d_NEMO_west_m04.nc').variables['votemper'][:];
west_bdy_sal.shape
west_bdy_sal[0,0,:,0]
west_bdy_sal = west_bdy_sal[:,:,::-1,:]; #### we have done this while writing the file in the final notebook (where vertical interepolation is also done)
west_bdy_sal[0,0,:,0]
west_bdy_sal.shape
#### just checking on the depth averaged bc
baro_west_ssh = nc.Dataset('/ocean/ssahu/CANYONS/bdy_files/2d_west_m04.nc').variables['sossheig'][:];
baro_west_ssh.shape
baro_west_ssh[0,:,0]
#### We need to switch this too
baro_west_ssh = baro_west_ssh[:,::-1,:];
baro_west_ssh.shape
baro_west_ssh[0,:,0]
```
|
github_jupyter
|
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import netCDF4 as nc
import xarray as xr
import matplotlib.cm as cm
from scipy.interpolate import interp1d
from salishsea_tools import (nc_tools, gsw_calls, viz_tools)
bathy_file = nc.Dataset('/ocean/ssahu/CANYONS/wcvi/grid/bathy_meter.nc');
bathy = bathy_file.variables['Bathymetry'][:];
lon = bathy_file.variables['nav_lon'][:];
lat = bathy_file.variables['nav_lat'][:];
bathy.shape
bathy
bathy[0:4,0:4] = 0.0;
bathy[-4:,0:4] = 0.0;
fig, ax = plt.subplots(1, 1, figsize=(10, 8))
viz_tools.set_aspect(ax)
mesh = ax.pcolormesh(lon, lat, bathy, cmap =cm.ocean)
fig.colorbar(mesh)
plt.show()
fig, ax = plt.subplots(1, 1, figsize=(10, 8))
viz_tools.set_aspect(ax)
mesh = ax.pcolormesh(bathy, cmap =cm.ocean)
fig.colorbar(mesh)
plt.show()
bathy[0,1]
def writebathy(filename,glamt,gphit,bathy):
bnc = nc.Dataset(filename, 'w', clobber=True)
NY,NX = glamt.shape
# Create the dimensions
bnc.createDimension('x', NX)
bnc.createDimension('y', NY)
bnc.createVariable('nav_lon', 'f', ('y', 'x'), zlib=True, complevel=4)
bnc.variables['nav_lon'].setncattr('units', 'degrees_east')
bnc.createVariable('nav_lat', 'f', ('y', 'x'), zlib=True, complevel=4)
bnc.variables['nav_lat'].setncattr('units', 'degrees_north')
bnc.createVariable('Bathymetry', 'd', ('y', 'x'), zlib=True, complevel=4, fill_value=0)
bnc.variables['Bathymetry'].setncattr('units', 'metres')
bnc.variables['nav_lon'][:] = glamt
bnc.variables['nav_lat'][:] = gphit
bnc.variables['Bathymetry'][:] = bathy
bnc.close()
# Write Bathymetry to NetCDF file
writebathy('/ocean/ssahu/CANYONS/wcvi/grid/bathy_files/island_bathy_meter.nc',lon,lat,bathy)
### Let us load the boundary files of the west and east files
west_bdy_sal = nc.Dataset('/ocean/ssahu/CANYONS/bdy_files/3d_NEMO_west_m04.nc').variables['vosaline'][:];
west_bdy_temp = nc.Dataset('/ocean/ssahu/CANYONS/bdy_files/3d_NEMO_west_m04.nc').variables['votemper'][:];
west_bdy_sal.shape
west_bdy_sal[0,0,:,0]
west_bdy_sal = west_bdy_sal[:,:,::-1,:]; #### we have done this while writing the file in the final notebook (where vertical interepolation is also done)
west_bdy_sal[0,0,:,0]
west_bdy_sal.shape
#### just checking on the depth averaged bc
baro_west_ssh = nc.Dataset('/ocean/ssahu/CANYONS/bdy_files/2d_west_m04.nc').variables['sossheig'][:];
baro_west_ssh.shape
baro_west_ssh[0,:,0]
#### We need to switch this too
baro_west_ssh = baro_west_ssh[:,::-1,:];
baro_west_ssh.shape
baro_west_ssh[0,:,0]
| 0.406862 | 0.880438 |
# 使用 OOP 对森林火灾建模
```
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
```
## 对森林建模
```
class Forest(object):
def __init__(self, size=(150, 150), p_sapling=0.0025, p_lightning=5.e-6, name=None):
self.size = size
self.trees = np.zeros(self.size, dtype=bool)
self.forest_fires = np.zeros(self.size, dtype=bool)
self.p_sapling = p_sapling
self.p_lightning = p_lightning
if name is not None:
self.name = name
else:
self.name = self.__class__.__name__
@property
def num_cells(self):
return self.size[0] * self.size[1]
@property
def tree_fraction(self):
return self.trees.sum() / float(self.num_cells)
@property
def fire_fraction(self):
return self.forest_fires.sum() / float(self.num_cells)
def advance_one_step(self):
self.grow_trees()
self.start_fires()
self.burn_trees()
def grow_trees(self):
growth_sites = self._rand_bool(self.p_sapling)
self.trees[growth_sites] = True
def start_fires(self):
lightning_strikes = (self._rand_bool(self.p_lightning) &
self.trees)
self.forest_fires[lightning_strikes] = True
def burn_trees(self):
fires = np.zeros((self.size[0] + 2, self.size[1] + 2), dtype=bool)
fires[1:-1, 1:-1] = self.forest_fires
north = fires[:-2, 1:-1]
south = fires[2:, 1:-1]
east = fires[1:-1, :-2]
west = fires[1:-1, 2:]
new_fires = (north | south | east | west) & self.trees
self.trees[self.forest_fires] = False
self.forest_fires = new_fires
def _rand_bool(self, p):
return np.random.uniform(size=self.trees.shape) < p
```
定义一个森林类之后,我们创建一个新的森林类对象:
```
forest = Forest()
```
显示当前的状态:
```
print forest.trees
print forest.forest_fires
```
使用 `matshow` 进行可视化:
```
plt.matshow(forest.trees, cmap=plt.cm.Greens)
plt.show()
```
## 模拟森林生长和火灾的过程
经过一段时间:
```
forest.advance_one_step()
plt.matshow(forest.trees, cmap=plt.cm.Greens)
plt.show()
```
循环很长时间:
```
for i in range(500):
forest.advance_one_step()
plt.matshow(forest.trees, cmap=plt.cm.Greens)
print forest.tree_fraction
```
迭代更长时间:
```
forest = Forest()
tree_fractions = []
for i in range(5000):
forest.advance_one_step()
tree_fractions.append(forest.tree_fraction)
fig = plt.figure()
ax0 = fig.add_subplot(1,2,1)
ax0.matshow(forest.trees, cmap=plt.cm.Greens)
ax1 = fig.add_subplot(1,2,2)
ax1.plot(tree_fractions)
plt.show()
```
|
github_jupyter
|
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
class Forest(object):
def __init__(self, size=(150, 150), p_sapling=0.0025, p_lightning=5.e-6, name=None):
self.size = size
self.trees = np.zeros(self.size, dtype=bool)
self.forest_fires = np.zeros(self.size, dtype=bool)
self.p_sapling = p_sapling
self.p_lightning = p_lightning
if name is not None:
self.name = name
else:
self.name = self.__class__.__name__
@property
def num_cells(self):
return self.size[0] * self.size[1]
@property
def tree_fraction(self):
return self.trees.sum() / float(self.num_cells)
@property
def fire_fraction(self):
return self.forest_fires.sum() / float(self.num_cells)
def advance_one_step(self):
self.grow_trees()
self.start_fires()
self.burn_trees()
def grow_trees(self):
growth_sites = self._rand_bool(self.p_sapling)
self.trees[growth_sites] = True
def start_fires(self):
lightning_strikes = (self._rand_bool(self.p_lightning) &
self.trees)
self.forest_fires[lightning_strikes] = True
def burn_trees(self):
fires = np.zeros((self.size[0] + 2, self.size[1] + 2), dtype=bool)
fires[1:-1, 1:-1] = self.forest_fires
north = fires[:-2, 1:-1]
south = fires[2:, 1:-1]
east = fires[1:-1, :-2]
west = fires[1:-1, 2:]
new_fires = (north | south | east | west) & self.trees
self.trees[self.forest_fires] = False
self.forest_fires = new_fires
def _rand_bool(self, p):
return np.random.uniform(size=self.trees.shape) < p
forest = Forest()
print forest.trees
print forest.forest_fires
plt.matshow(forest.trees, cmap=plt.cm.Greens)
plt.show()
forest.advance_one_step()
plt.matshow(forest.trees, cmap=plt.cm.Greens)
plt.show()
for i in range(500):
forest.advance_one_step()
plt.matshow(forest.trees, cmap=plt.cm.Greens)
print forest.tree_fraction
forest = Forest()
tree_fractions = []
for i in range(5000):
forest.advance_one_step()
tree_fractions.append(forest.tree_fraction)
fig = plt.figure()
ax0 = fig.add_subplot(1,2,1)
ax0.matshow(forest.trees, cmap=plt.cm.Greens)
ax1 = fig.add_subplot(1,2,2)
ax1.plot(tree_fractions)
plt.show()
| 0.717309 | 0.829354 |
```
import os
import random
import re
import sys
import string
from collections import Counter, defaultdict
from string import punctuation
from time import sleep
from tqdm.notebook import tqdm
import matplotlib
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
import enchant
import nltk
import spacy
from nltk.tag import StanfordNERTagger
from nltk.tokenize import sent_tokenize, word_tokenize
from spacy.lang.en.stop_words import STOP_WORDS
d = enchant.Dict("en_US")
spacy_nlp = spacy.load("en_core_web_sm")
%matplotlib inline
RAW_DATA_PATH = "data/raw/"
SAVE_DATA_PATH = "data/processed/"
```
## Helper Functions
```
def readCorpus(url):
content = requests.get(url).content.decode('ascii', 'ignore')
content_list = sent_tokenize(content.replace('\r\n', ' '))
filtered_list = filterSentences(content_list)
return filtered_list[100:]
def splitData(data, keepnum):
random.seed(123)
split1 = int(keepnum * 0.05)
remain = split1 % 128
split1 += remain
random.shuffle(data)
selectedData = data[0:keepnum]
train = selectedData[split1:]
test = selectedData[0:split1]
return (train, test)
```
## ASAP Essays
```
aes_file = RAW_DATA_PATH + "asap-aes/training_set_rel3.tsv"
aes_list = []
with open(aes_file, encoding='utf-8', errors='ignore') as f:
for line in f:
aes_list.append(line.strip().split('\t'))
aes_df = pd.DataFrame(aes_list[1:], columns=aes_list[0])
num_cols = [
'rater1_domain1', 'rater2_domain1', 'rater3_domain1',
'domain1_score', 'rater1_domain2', 'rater2_domain2', 'domain2_score'
]
aes_df[num_cols] = aes_df[num_cols].applymap(
lambda x: np.nan if (x == "") or (x is None) else int(x)
)
aes_df["total_score"] = (aes_df["domain1_score"] + aes_df["domain2_score"].fillna(aes_df["domain1_score"]))/2
cols = ['essay_id', 'essay_set', 'essay', 'domain1_score', 'domain2_score', 'total_score']
aes_df[cols].head()
aes_essays = aes_df.query("total_score > 1")["essay"].values.tolist()
def cleanAES(dataList):
newList = []
for sent in dataList:
sent = re.sub(r'(?<=^|(?<=[^a-zA-Z0-9-_\.]))@([A-Za-z]+[A-Za-z0-9-_]+)', '', sent)
sent = sent.replace("\\","")
sent = sent.replace("\\'","")
sent = sent.strip().strip("'").strip('"')
if len(sent) < 40:
continue
if '^' in sent:
continue
sent = ' '.join(sent.split())
sent = sent.lower()#.decode('utf8', 'ignore')
newList.append(sent + '\n')
return newList
clean_aes = cleanAES(aes_essays)
d_check = lambda sent: map(lambda x: d.check(x), word_tokenize(sent))
split_aes = []
for essay in tqdm(clean_aes):
split_up = sent_tokenize(essay)
for sent in split_up:
words = word_tokenize(sent)
if not all(list(d_check(sent))):
continue
if len(words) > 30:
continue
if len(words) < 4:
continue
split_aes.append(sent+'\n')
with open(SAVE_DATA_PATH + "aes.txt", 'w') as f:
f.writelines(split_aes)
len(split_aes)
```
## Sophisticated Datasets
```
firstCorpus = [
"http://www.gutenberg.org/cache/epub/5827/pg5827.txt", #Russell, The Problems of Philosophy
"http://www.gutenberg.org/cache/epub/15718/pg15718.txt", #Bleyer, How To Write Special Feature Articles
"https://www.gutenberg.org/files/492/492-0.txt", #Essays in the Art of Writing, by Robert Louis
"https://www.gutenberg.org/files/37090/37090-0.txt", #Our Knowledge of the External World as a Field for Scientific Method in Philosoph, by Bertrand Russell
"https://www.gutenberg.org/files/42580/42580-8.txt", #Expository Writing, by Mervin James Curl
"http://www.gutenberg.org/cache/epub/2529/pg2529.txt", #The Analysis of Mind, by Bertrand Russell
"https://www.gutenberg.org/files/38280/38280-0.txt", #Modern Essays, by Various
"https://www.gutenberg.org/files/205/205-0.txt", #Walden, and On The Duty Of Civil Disobedience, by Henry David Thoreau
"https://www.gutenberg.org/files/1022/1022-0.txt", #Walking, by Henry David Thoreau
"http://www.gutenberg.org/cache/epub/34901/pg34901.txt",
"https://www.gutenberg.org/files/98/98-0.txt",
"http://www.gutenberg.org/cache/epub/32168/pg32168.txt",
"https://www.gutenberg.org/files/766/766-0.txt",
"https://www.gutenberg.org/files/1250/1250-0.txt",
"https://www.gutenberg.org/files/140/140-0.txt",
"https://www.gutenberg.org/files/1400/1400-0.txt",
"https://www.gutenberg.org/files/215/215-0.txt", # London, call of the wild.
"http://www.gutenberg.org/cache/epub/910/pg910.txt", #London White Fang
"https://www.gutenberg.org/files/786/786-0.txt",
"http://www.gutenberg.org/cache/epub/815/pg815.txt",
"http://www.gutenberg.org/cache/epub/10378/pg10378.txt",
"http://www.gutenberg.org/cache/epub/5123/pg5123.txt",
"http://www.gutenberg.org/cache/epub/5669/pg5669.txt"
]
secondCorpus = [
"https://ota.bodleian.ox.ac.uk/repository/xmlui/bitstream/handle/20.500.12024/1906/cardinal-1906.txt?sequence=3&isAllowed=y",
"https://ota.bodleian.ox.ac.uk/repository/xmlui/bitstream/handle/20.500.12024/1658/WoolfWaves-1658.txt?sequence=4&isAllowed=y",
"https://ota.bodleian.ox.ac.uk/repository/xmlui/bitstream/handle/20.500.12024/0172/moderns-0172.txt?sequence=4&isAllowed=y",
"https://ota.bodleian.ox.ac.uk/repository/xmlui/bitstream/handle/20.500.12024/3246/3246.txt?sequence=8&isAllowed=y",
"https://ota.bodleian.ox.ac.uk/repository/xmlui/bitstream/handle/20.500.12024/2042/joywoman-2042.txt?sequence=4&isAllowed=y",
"https://ota.bodleian.ox.ac.uk/repository/xmlui/bitstream/handle/20.500.12024/3135/3135.txt?sequence=8&isAllowed=y",
"https://ota.bodleian.ox.ac.uk/repository/xmlui/bitstream/handle/20.500.12024/1711/wiseman-1711.txt?sequence=4&isAllowed=y",
"https://ota.bodleian.ox.ac.uk/repository/xmlui/bitstream/handle/20.500.12024/3245/3245.txt?sequence=8&isAllowed=y",
"http://www.gutenberg.org/cache/epub/5827/pg5827.txt", #Russell, The Problems of Philosophy
"http://www.gutenberg.org/cache/epub/15718/pg15718.txt", #Bleyer, How To Write Special Feature Articles
"https://www.gutenberg.org/files/492/492-0.txt", #Essays in the Art of Writing, by Robert Louis
"https://www.gutenberg.org/files/37090/37090-0.txt", #Our Knowledge of the External World as a Field for Scientific Method in Philosoph, by Bertrand Russell
"https://www.gutenberg.org/files/42580/42580-8.txt", #Expository Writing, by Mervin James Curl
"http://www.gutenberg.org/cache/epub/2529/pg2529.txt", #The Analysis of Mind, by Bertrand Russell
"https://www.gutenberg.org/files/38280/38280-0.txt",
"https://www.gutenberg.org/files/215/215-0.txt", # London, call of the wild.
"http://www.gutenberg.org/cache/epub/910/pg910.txt",
"https://www.gutenberg.org/files/25110/25110-0.txt",
"http://www.gutenberg.org/cache/epub/32168/pg32168.txt",
"http://www.gutenberg.org/cache/epub/16712/pg16712.txt",
"http://www.gutenberg.org/cache/epub/7514/pg7514.txt",
"http://www.gutenberg.org/cache/epub/18477/pg18477.txt",
"http://www.gutenberg.org/cache/epub/5669/pg5669.txt",
"http://www.gutenberg.org/cache/epub/5123/pg5123.txt",
"http://www.gutenberg.org/cache/epub/10378/pg10378.txt",
"https://www.gutenberg.org/files/140/140-0.txt",
"http://www.gutenberg.org/cache/epub/44082/pg44082.txt"
]
def filterSentences(sentList):
filteredList = []
for sent in sentList:
sent = sent.replace("\\","")
sent = sent.replace("\\'","")
if len(sent) < 40:
continue
if '^' in sent:
continue
if bool(re.search(r'\d', sent)):
continue
if bool(re.search(r"\b[A-Z][A-Z]+\b", sent)):
continue
if bool(re.search(r'\"', sent)):
continue
if bool(re.search(r'_', sent)):
continue
sent = sent.strip()
sent = sent.lower()
sent = ' '.join(sent.split())
filteredList.append(sent + '\n')
return filteredList
```
### First Corpus
```
allGuten = []
for url in firstCorpus:
allGuten.append(readCorpus(url))
sum([len(x) for x in allGuten])
allSophs = [y for x in allGuten for y in x]
with open(SAVE_DATA_PATH + "allsophs.txt", 'w') as f:
f.writelines(allSophs)
with open(SAVE_DATA_PATH + "allsophs.txt", 'r') as f:
allSophs = f.read_lines
```
### Second Corpus
```
allSecondCorpus = []
for url in secondCorpus:
allSecondCorpus.append(readCorpus(url))
sum([len(x) for x in allSecondCorpus])
allSophs = [y for x in allSecondCorpus for y in x]
with open(SAVE_DATA_PATH + "soph_2.txt", 'w') as f:
f.writelines(allSophs)
```
### Second Corpus without Puctuation
```
punctSoph = [y for x in allSecondCorpus for y in x]
allSophs = list(map(removePunc, punctSoph))
with open(SAVE_DATA_PATH + "KMW_essays.txt", 'r') as f:
kmw = f.readlines()
with open(SAVE_DATA_PATH + "aes.txt", 'r') as f:
split_aes = f.readlines()
allnaive = kmw + split_aes[0:50000]
allnaive = list(map(removePunc, allnaive))
```
## Hewlett ASAP + Sophisticated with Tokens
### Process ASAP tokens
```
aes_file = RAW_DATA_PATH + "asap-aes/training_set_rel3.tsv"
aes_list = []
with open(aes_file, encoding='utf-8', errors='ignore') as f:
for line in f:
aes_list.append(line.strip().split('\t'))
aes_df = pd.DataFrame(aes_list[1:], columns=aes_list[0])
num_cols = [
'rater1_domain1', 'rater2_domain1', 'rater3_domain1',
'domain1_score', 'rater1_domain2', 'rater2_domain2','domain2_score'
]
aes_df[num_cols] = aes_df[num_cols].applymap(
lambda x: np.nan if (x == "") or (x is None) else int(x)
)
aes_df["total_score"] = (1/2)*(
aes_df["domain1_score"] +
aes_df["domain2_score"].fillna(aes_df["domain1_score"])
)
aes_essays = aes_df.query("total_score > 1")["essay"].values.tolist()
token_regex = r'(?<=^|(?<=[^a-zA-Z0-9-_\.]))@([A-Za-z]+[0-9]+)'
token_list = []
new_strings = []
for string in aes_essays:
matches = re.findall(token_regex, string)
token_list.extend(list(set(matches)))
string = string.replace('@' , '')
replacement = {x: "<" + re.sub('[0-9]', '', x) + ">" for x in matches}
for match in matches:
string = string.replace(match, replacement[match])
new_strings.append(string)
general_tokens = list(set([re.sub('[0-9]', '', x) for x in token_list]))
general_tokens
def cleanAES(dataList):
newList = []
for sent in dataList:
sent = sent.replace("\\'","")
sent = sent.strip().strip("'").strip('"')
sent = sent.replace("'", "")
sent = sent.replace('"', "")
sent = re.sub('([.,!?()])', r' \1 ', sent)
sent = re.sub('\s{2,}', ' ', sent)
sent = sent.replace(">s", ">")
sent = sent.strip()
if len(sent) < 40:
continue
if '^' in sent:
continue
sent = ' '.join(sent.split())
sent = sent.lower()
newList.append(sent + '\n')
return newList
clean_aes = cleanAES(new_strings)
d_check = lambda words: map(lambda x: d.check(x), words)
split_aes = []
for essay in tqdm.notebook.tqdm(clean_aes):
split_up = sent_tokenize(essay)
for sent in split_up:
words_ex_tokens = [x for x in sent.split() if x.upper().strip('<').strip('>') not in general_tokens]
words = word_tokenize(' '.join(words_ex_tokens))
if not all(list(d_check(words))):
continue
if len(words) > 30:
continue
if len(words) < 4:
continue
split_aes.append(sent+'\n')
len(split_aes)
```
### Gutenberg + Oxford
```
taggedCorpus = [
"http://www.gutenberg.org/cache/epub/5827/pg5827.txt", #Russell, The Problems of Philosophy
"http://www.gutenberg.org/cache/epub/15718/pg15718.txt", #Bleyer, How To Write Special Feature Articles
"https://www.gutenberg.org/files/492/492-0.txt", #Essays in the Art of Writing, by Robert Louis
"https://www.gutenberg.org/files/37090/37090-0.txt", #Our Knowledge of the External World as a Field for Scientific Method in Philosoph, by Bertrand Russell
"https://www.gutenberg.org/files/42580/42580-8.txt", #Expository Writing, by Mervin James Curl
"http://www.gutenberg.org/cache/epub/2529/pg2529.txt", #The Analysis of Mind, by Bertrand Russell
"https://www.gutenberg.org/files/38280/38280-0.txt", #Modern Essays, by Various
"https://www.gutenberg.org/files/205/205-0.txt", #Walden, and On The Duty Of Civil Disobedience, by Henry David Thoreau
"https://www.gutenberg.org/files/1022/1022-0.txt", #Walking, by Henry David Thoreau
"http://www.gutenberg.org/cache/epub/34901/pg34901.txt",
"https://www.gutenberg.org/files/98/98-0.txt",
"http://www.gutenberg.org/cache/epub/32168/pg32168.txt",
"https://www.gutenberg.org/files/1250/1250-0.txt",
"https://www.gutenberg.org/files/140/140-0.txt",
"https://www.gutenberg.org/files/215/215-0.txt", # London, call of the wild.
"http://www.gutenberg.org/cache/epub/910/pg910.txt", #London White Fang
"http://www.gutenberg.org/cache/epub/10378/pg10378.txt",
"http://www.gutenberg.org/cache/epub/5123/pg5123.txt",
"http://www.gutenberg.org/cache/epub/5669/pg5669.txt",
"https://ota.bodleian.ox.ac.uk/repository/xmlui/bitstream/handle/20.500.12024/1906/cardinal-1906.txt?sequence=3&isAllowed=y",
"https://ota.bodleian.ox.ac.uk/repository/xmlui/bitstream/handle/20.500.12024/1658/WoolfWaves-1658.txt?sequence=4&isAllowed=y",
"https://ota.bodleian.ox.ac.uk/repository/xmlui/bitstream/handle/20.500.12024/0172/moderns-0172.txt?sequence=4&isAllowed=y",
]
def filterSentences(sentList):
filteredList = []
for sent in sentList:
sent = sent.replace("\\","")
sent = sent.replace("\\'","")
if len(sent) < 40:
continue
if '^' in sent:
continue
if bool(re.search(r"\b[A-Z][A-Z]+\b", sent)):
continue
if bool(re.search(r'_', sent)):
continue
sent = sent.strip()
sent = ' '.join(sent.split())
filteredList.append(sent + '\n')
return filteredList
taggedTexts = []
for url in taggedCorpus:
taggedTexts.append(readCorpus(url))
sum([len(x) for x in taggedTexts])
allTaggedSophs = [y for x in taggedTexts for y in x]
```
#### StanfordNERTagger
```
jar = "stanford-ner-2018-10-16/stanford-ner-3.9.2.jar"
model = "stanford-ner-2018-10-16/classifiers/english.muc.7class.distsim.crf.ser.gz"
st = StanfordNERTagger(model, jar)
TAG_HASH = {}
def tagSentence(sent):
tokenize = word_tokenize(sent)
tagged = st.tag(tokenize)
tokens = dict([x for x in tagged if x[1] != 'O'])
tokens = {x: "<" + y + ">" for x,y in tokens.items() }
new_sent = [tokens.get(x, x) for x in tokenize]
for word, repl in tokens.items():
TAG_HASH[word] = repl
return(' '.join(new_sent) + '\n')
tagged_write_path = SAVE_DATA_PATH + 'tagged_data/'
batches = [x for x in range(len(allTaggedSophs)) if x % 1000 == 0]
batches.append(len(allTaggedSophs))
for i in tqdm.notebook.tqdm(range(len(batches)-1)):
start = batches[i]
stop = batches[i+1]
batch = allTaggedSophs[start:stop]
stanfordTaggedSophs = []
for sent in tqdm.notebook.tqdm(batch):
stanfordTaggedSophs.append(tagSentence(sent))
with open(tagged_write_path + f'batch{i}', 'w') as f:
f.writelines(stanfordTaggedSophs)
TAG_HASH
```
#### spacy NER
```
#https://spacy.io/api/annotation#named-entities
spacy2stanford = {
'NORP': 'CAPS',
'FAC': 'LOCATION',
'ORG': 'ORGANIZATION',
'GPE': 'STATE',
'LOC': 'LOCATION',
'PRODUCT': 'CAPS',
'EVENT': 'CAPS',
'WORK_OF_ART': 'CAPS',
'LAW': 'CAPS',
'LANGUAGE': 'CAPS',
'QUANTITY': 'NUM',
'ORDINAL': 'NUM',
'CARDINAL': 'NUM'
}
spacy_write_path = SAVE_DATA_PATH + 'tagged_data/'
SPACY_TOKENS = {}
def spacyTagger(sent):
tokenize = word_tokenize(sent)
document = spacy_nlp(sent)
token_map = {}
for element in document.ents:
label = spacy2stanford.get(str(element.label_), str(element.label_))
SPACY_TOKENS[str(element)] = label
token_map[str(element)] = "<" + label + ">"
new_sent = [token_map.get(x, x) for x in tokenize]
return(' '.join(new_sent) + '\n')
spacyTaggedSophs = []
for sent in tqdm.notebook.tqdm(allTaggedSophs):
spacyTaggedSophs.append(spacyTagger(sent))
with open(spacy_write_path + f'spacyTaggedSophs.txt', 'w') as f:
f.writelines(spacyTaggedSophs)
with open(spacy_write_path + 'spacyTaggedSophs.txt') as f:
tagged = f.readlines()
tagged = list(map(str.lower, tagged))
keepshort = []
for sent in tagged:
words = len(sent.split(' '))
if words <= 30:
keepshort.append(sent)
len(keepshort)
keepnum = 32100
sophstrain, sophtest = splitData(keepshort, keepnum)
print(len(sophtest))
print(len(sophstrain))
with open(SAVE_DATA_PATH + "soph_test_tagged.txt", 'w') as f:
f.writelines(sophtest)
with open(SAVE_DATA_PATH + "soph_train_tagged.txt", 'w') as f:
f.writelines(sophstrain)
allnaive = [x for x in split_aes if len(x) > 20]
naiveshort = []
for sent in allnaive:
words = len(sent.split(' '))
if words <= 30:
naiveshort.append(sent)
len(naiveshort)
naivetrain, naivetest = splitData(naiveshort, keepnum)
print(len(naivetest))
print(len(naivetrain))
with open(SAVE_DATA_PATH + "naive_test_tagged.txt", 'w') as f:
f.writelines(naivetest)
with open(SAVE_DATA_PATH + "naive_train_tagged.txt", 'w') as f:
f.writelines(naivetrain)
```
### Tagged without Punctuation
```
def removePunc(sent):
punct = string.punctuation.replace('<', '').replace('>', '')
sent = re.sub('['+punct+']', '', sent)
sent = ' '.join(sent.split())
return(sent + '\n')
tagged_nopunct = list(map(removePunc, tagged))
split_aes_nopunct = list(map(removePunc, split_aes))
short_soph_tagged = []
for sent in tagged_nopunct:
words = len(sent.split(' '))
if words <= 30:
short_soph_tagged.append(sent)
sophstrain, sophtest = splitData(short_soph_tagged, keepnum)
print(len(sophtest))
print(len(sophstrain))
with open(SAVE_DATA_PATH + "soph_test_tagged_nopunct.txt", 'w') as f:
f.writelines(sophtest)
with open(SAVE_DATA_PATH + "soph_train_tagged_nopunct.txt", 'w') as f:
f.writelines(sophstrain)
allnaive = [x for x in split_aes_nopunct if len(x) > 20]
naiveshort_tag_np = []
for sent in allnaive:
words = len(sent.split(' '))
if words <= 30:
naiveshort_tag_np.append(sent)
naivetrain, naivetest = splitData(naiveshort_tag_np, keepnum)
with open(SAVE_DATA_PATH + "naive_test_tagged_nopunct.txt", 'w') as f:
f.writelines(naivetest)
with open(SAVE_DATA_PATH + "naive_train_tagged_nopunct.txt", 'w') as f:
f.writelines(naivetrain)
```
## My Kids Way Essays
```
data = []
for item in text:
data.append(item.get_text().split('\n'))
paginated_links = "https://www.mykidsway.com/essays/page/{}/"
all_essays = req = requests.get("https://www.mykidsway.com/essays/")
essay_html = BeautifulSoup(all_essays.content, 'html.parser')
divs = essay_html.find_all("div", class_="hovereffect")
all_links = []
for content in divs:
all_links.append(content.find("a").get("href"))
for i in range(1,20):
new_page = paginated_links.format(str(i))
all_essays = req = requests.get(new_page)
essay_html = BeautifulSoup(all_essays.content, 'html.parser')
divs = essay_html.find_all("div", class_="hovereffect")
for content in divs:
all_links.append(content.find("a").get("href"))
sleep(1)
def getText(link):
req = requests.get(link)
soup = BeautifulSoup(req.content, 'html.parser')
text = soup.find_all("span", itemprop="description")
data = []
for item in text:
split_text = item.get_text().split('\n')
total_len = sum([len(x) for x in split_text])
if total_len > 2000:
print("skipping ", link)
continue
for sentence in split_text:
data.append(sentence)
return data
all_sentences = []
for link in set(all_links):
print(link)
data_list = getText(link)
for sentence in data_list:
all_sentences.append(sentence)
sleep(1)
def cleanKMW(data_list):
newList = []
for sent in data_list:
if len(sent) < 40:
continue
if '^' in sent:
continue
if bool(re.search(r'\d', sent)):
continue
sent = sent.lower()
newList.append(sent + '\n')
return newList
cleanedKMW = cleanKMW(all_sentences)
reordered = []
for sent in cleanedKMW:
split_sent = sent_tokenize(sent.strip())
for sentence in split_sent:
if (len(word_tokenize(sentence)) > 20) or (len(word_tokenize(sentence)) < 4):
continue
reordered.append(sentence + '\n')
with open(SAVE_DATA_PATH + "KMW_essays.txt", 'w') as f:
f.writelines(reordered)
```
### Data Saving Process
```
def limitLength(dataList, maxlen):
keepshort = []
for sent in dataList:
words = len(word_tokenize(sent))
if words <= maxlen:
keepshort.append(sent)
return keepshort
def writeOut(data, fileroot, maxlen):
keepshort = limitLength(dataList, maxlen)
train, test = splitData(keepshort, keepnum)
with open(SAVE_DATA_PATH + f"test_{fileroot}.txt", 'w') as f:
f.writelines(test)
with open(SAVE_DATA_PATH + f"train_{fileroot}.txt", 'w') as f:
f.writelines(train)
with open(SAVE_DATA_PATH + "KMW_essays.txt", 'r') as f:
kmw = f.readlines()
with open(SAVE_DATA_PATH + "aes.txt", 'r') as f:
split_aes = f.readlines()
allnaive = kmw + split_aes[0:50000]
writeOut(allnaive, "naive_3", 35)
```
|
github_jupyter
|
import os
import random
import re
import sys
import string
from collections import Counter, defaultdict
from string import punctuation
from time import sleep
from tqdm.notebook import tqdm
import matplotlib
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
import enchant
import nltk
import spacy
from nltk.tag import StanfordNERTagger
from nltk.tokenize import sent_tokenize, word_tokenize
from spacy.lang.en.stop_words import STOP_WORDS
d = enchant.Dict("en_US")
spacy_nlp = spacy.load("en_core_web_sm")
%matplotlib inline
RAW_DATA_PATH = "data/raw/"
SAVE_DATA_PATH = "data/processed/"
def readCorpus(url):
content = requests.get(url).content.decode('ascii', 'ignore')
content_list = sent_tokenize(content.replace('\r\n', ' '))
filtered_list = filterSentences(content_list)
return filtered_list[100:]
def splitData(data, keepnum):
random.seed(123)
split1 = int(keepnum * 0.05)
remain = split1 % 128
split1 += remain
random.shuffle(data)
selectedData = data[0:keepnum]
train = selectedData[split1:]
test = selectedData[0:split1]
return (train, test)
aes_file = RAW_DATA_PATH + "asap-aes/training_set_rel3.tsv"
aes_list = []
with open(aes_file, encoding='utf-8', errors='ignore') as f:
for line in f:
aes_list.append(line.strip().split('\t'))
aes_df = pd.DataFrame(aes_list[1:], columns=aes_list[0])
num_cols = [
'rater1_domain1', 'rater2_domain1', 'rater3_domain1',
'domain1_score', 'rater1_domain2', 'rater2_domain2', 'domain2_score'
]
aes_df[num_cols] = aes_df[num_cols].applymap(
lambda x: np.nan if (x == "") or (x is None) else int(x)
)
aes_df["total_score"] = (aes_df["domain1_score"] + aes_df["domain2_score"].fillna(aes_df["domain1_score"]))/2
cols = ['essay_id', 'essay_set', 'essay', 'domain1_score', 'domain2_score', 'total_score']
aes_df[cols].head()
aes_essays = aes_df.query("total_score > 1")["essay"].values.tolist()
def cleanAES(dataList):
newList = []
for sent in dataList:
sent = re.sub(r'(?<=^|(?<=[^a-zA-Z0-9-_\.]))@([A-Za-z]+[A-Za-z0-9-_]+)', '', sent)
sent = sent.replace("\\","")
sent = sent.replace("\\'","")
sent = sent.strip().strip("'").strip('"')
if len(sent) < 40:
continue
if '^' in sent:
continue
sent = ' '.join(sent.split())
sent = sent.lower()#.decode('utf8', 'ignore')
newList.append(sent + '\n')
return newList
clean_aes = cleanAES(aes_essays)
d_check = lambda sent: map(lambda x: d.check(x), word_tokenize(sent))
split_aes = []
for essay in tqdm(clean_aes):
split_up = sent_tokenize(essay)
for sent in split_up:
words = word_tokenize(sent)
if not all(list(d_check(sent))):
continue
if len(words) > 30:
continue
if len(words) < 4:
continue
split_aes.append(sent+'\n')
with open(SAVE_DATA_PATH + "aes.txt", 'w') as f:
f.writelines(split_aes)
len(split_aes)
firstCorpus = [
"http://www.gutenberg.org/cache/epub/5827/pg5827.txt", #Russell, The Problems of Philosophy
"http://www.gutenberg.org/cache/epub/15718/pg15718.txt", #Bleyer, How To Write Special Feature Articles
"https://www.gutenberg.org/files/492/492-0.txt", #Essays in the Art of Writing, by Robert Louis
"https://www.gutenberg.org/files/37090/37090-0.txt", #Our Knowledge of the External World as a Field for Scientific Method in Philosoph, by Bertrand Russell
"https://www.gutenberg.org/files/42580/42580-8.txt", #Expository Writing, by Mervin James Curl
"http://www.gutenberg.org/cache/epub/2529/pg2529.txt", #The Analysis of Mind, by Bertrand Russell
"https://www.gutenberg.org/files/38280/38280-0.txt", #Modern Essays, by Various
"https://www.gutenberg.org/files/205/205-0.txt", #Walden, and On The Duty Of Civil Disobedience, by Henry David Thoreau
"https://www.gutenberg.org/files/1022/1022-0.txt", #Walking, by Henry David Thoreau
"http://www.gutenberg.org/cache/epub/34901/pg34901.txt",
"https://www.gutenberg.org/files/98/98-0.txt",
"http://www.gutenberg.org/cache/epub/32168/pg32168.txt",
"https://www.gutenberg.org/files/766/766-0.txt",
"https://www.gutenberg.org/files/1250/1250-0.txt",
"https://www.gutenberg.org/files/140/140-0.txt",
"https://www.gutenberg.org/files/1400/1400-0.txt",
"https://www.gutenberg.org/files/215/215-0.txt", # London, call of the wild.
"http://www.gutenberg.org/cache/epub/910/pg910.txt", #London White Fang
"https://www.gutenberg.org/files/786/786-0.txt",
"http://www.gutenberg.org/cache/epub/815/pg815.txt",
"http://www.gutenberg.org/cache/epub/10378/pg10378.txt",
"http://www.gutenberg.org/cache/epub/5123/pg5123.txt",
"http://www.gutenberg.org/cache/epub/5669/pg5669.txt"
]
secondCorpus = [
"https://ota.bodleian.ox.ac.uk/repository/xmlui/bitstream/handle/20.500.12024/1906/cardinal-1906.txt?sequence=3&isAllowed=y",
"https://ota.bodleian.ox.ac.uk/repository/xmlui/bitstream/handle/20.500.12024/1658/WoolfWaves-1658.txt?sequence=4&isAllowed=y",
"https://ota.bodleian.ox.ac.uk/repository/xmlui/bitstream/handle/20.500.12024/0172/moderns-0172.txt?sequence=4&isAllowed=y",
"https://ota.bodleian.ox.ac.uk/repository/xmlui/bitstream/handle/20.500.12024/3246/3246.txt?sequence=8&isAllowed=y",
"https://ota.bodleian.ox.ac.uk/repository/xmlui/bitstream/handle/20.500.12024/2042/joywoman-2042.txt?sequence=4&isAllowed=y",
"https://ota.bodleian.ox.ac.uk/repository/xmlui/bitstream/handle/20.500.12024/3135/3135.txt?sequence=8&isAllowed=y",
"https://ota.bodleian.ox.ac.uk/repository/xmlui/bitstream/handle/20.500.12024/1711/wiseman-1711.txt?sequence=4&isAllowed=y",
"https://ota.bodleian.ox.ac.uk/repository/xmlui/bitstream/handle/20.500.12024/3245/3245.txt?sequence=8&isAllowed=y",
"http://www.gutenberg.org/cache/epub/5827/pg5827.txt", #Russell, The Problems of Philosophy
"http://www.gutenberg.org/cache/epub/15718/pg15718.txt", #Bleyer, How To Write Special Feature Articles
"https://www.gutenberg.org/files/492/492-0.txt", #Essays in the Art of Writing, by Robert Louis
"https://www.gutenberg.org/files/37090/37090-0.txt", #Our Knowledge of the External World as a Field for Scientific Method in Philosoph, by Bertrand Russell
"https://www.gutenberg.org/files/42580/42580-8.txt", #Expository Writing, by Mervin James Curl
"http://www.gutenberg.org/cache/epub/2529/pg2529.txt", #The Analysis of Mind, by Bertrand Russell
"https://www.gutenberg.org/files/38280/38280-0.txt",
"https://www.gutenberg.org/files/215/215-0.txt", # London, call of the wild.
"http://www.gutenberg.org/cache/epub/910/pg910.txt",
"https://www.gutenberg.org/files/25110/25110-0.txt",
"http://www.gutenberg.org/cache/epub/32168/pg32168.txt",
"http://www.gutenberg.org/cache/epub/16712/pg16712.txt",
"http://www.gutenberg.org/cache/epub/7514/pg7514.txt",
"http://www.gutenberg.org/cache/epub/18477/pg18477.txt",
"http://www.gutenberg.org/cache/epub/5669/pg5669.txt",
"http://www.gutenberg.org/cache/epub/5123/pg5123.txt",
"http://www.gutenberg.org/cache/epub/10378/pg10378.txt",
"https://www.gutenberg.org/files/140/140-0.txt",
"http://www.gutenberg.org/cache/epub/44082/pg44082.txt"
]
def filterSentences(sentList):
filteredList = []
for sent in sentList:
sent = sent.replace("\\","")
sent = sent.replace("\\'","")
if len(sent) < 40:
continue
if '^' in sent:
continue
if bool(re.search(r'\d', sent)):
continue
if bool(re.search(r"\b[A-Z][A-Z]+\b", sent)):
continue
if bool(re.search(r'\"', sent)):
continue
if bool(re.search(r'_', sent)):
continue
sent = sent.strip()
sent = sent.lower()
sent = ' '.join(sent.split())
filteredList.append(sent + '\n')
return filteredList
allGuten = []
for url in firstCorpus:
allGuten.append(readCorpus(url))
sum([len(x) for x in allGuten])
allSophs = [y for x in allGuten for y in x]
with open(SAVE_DATA_PATH + "allsophs.txt", 'w') as f:
f.writelines(allSophs)
with open(SAVE_DATA_PATH + "allsophs.txt", 'r') as f:
allSophs = f.read_lines
allSecondCorpus = []
for url in secondCorpus:
allSecondCorpus.append(readCorpus(url))
sum([len(x) for x in allSecondCorpus])
allSophs = [y for x in allSecondCorpus for y in x]
with open(SAVE_DATA_PATH + "soph_2.txt", 'w') as f:
f.writelines(allSophs)
punctSoph = [y for x in allSecondCorpus for y in x]
allSophs = list(map(removePunc, punctSoph))
with open(SAVE_DATA_PATH + "KMW_essays.txt", 'r') as f:
kmw = f.readlines()
with open(SAVE_DATA_PATH + "aes.txt", 'r') as f:
split_aes = f.readlines()
allnaive = kmw + split_aes[0:50000]
allnaive = list(map(removePunc, allnaive))
aes_file = RAW_DATA_PATH + "asap-aes/training_set_rel3.tsv"
aes_list = []
with open(aes_file, encoding='utf-8', errors='ignore') as f:
for line in f:
aes_list.append(line.strip().split('\t'))
aes_df = pd.DataFrame(aes_list[1:], columns=aes_list[0])
num_cols = [
'rater1_domain1', 'rater2_domain1', 'rater3_domain1',
'domain1_score', 'rater1_domain2', 'rater2_domain2','domain2_score'
]
aes_df[num_cols] = aes_df[num_cols].applymap(
lambda x: np.nan if (x == "") or (x is None) else int(x)
)
aes_df["total_score"] = (1/2)*(
aes_df["domain1_score"] +
aes_df["domain2_score"].fillna(aes_df["domain1_score"])
)
aes_essays = aes_df.query("total_score > 1")["essay"].values.tolist()
token_regex = r'(?<=^|(?<=[^a-zA-Z0-9-_\.]))@([A-Za-z]+[0-9]+)'
token_list = []
new_strings = []
for string in aes_essays:
matches = re.findall(token_regex, string)
token_list.extend(list(set(matches)))
string = string.replace('@' , '')
replacement = {x: "<" + re.sub('[0-9]', '', x) + ">" for x in matches}
for match in matches:
string = string.replace(match, replacement[match])
new_strings.append(string)
general_tokens = list(set([re.sub('[0-9]', '', x) for x in token_list]))
general_tokens
def cleanAES(dataList):
newList = []
for sent in dataList:
sent = sent.replace("\\'","")
sent = sent.strip().strip("'").strip('"')
sent = sent.replace("'", "")
sent = sent.replace('"', "")
sent = re.sub('([.,!?()])', r' \1 ', sent)
sent = re.sub('\s{2,}', ' ', sent)
sent = sent.replace(">s", ">")
sent = sent.strip()
if len(sent) < 40:
continue
if '^' in sent:
continue
sent = ' '.join(sent.split())
sent = sent.lower()
newList.append(sent + '\n')
return newList
clean_aes = cleanAES(new_strings)
d_check = lambda words: map(lambda x: d.check(x), words)
split_aes = []
for essay in tqdm.notebook.tqdm(clean_aes):
split_up = sent_tokenize(essay)
for sent in split_up:
words_ex_tokens = [x for x in sent.split() if x.upper().strip('<').strip('>') not in general_tokens]
words = word_tokenize(' '.join(words_ex_tokens))
if not all(list(d_check(words))):
continue
if len(words) > 30:
continue
if len(words) < 4:
continue
split_aes.append(sent+'\n')
len(split_aes)
taggedCorpus = [
"http://www.gutenberg.org/cache/epub/5827/pg5827.txt", #Russell, The Problems of Philosophy
"http://www.gutenberg.org/cache/epub/15718/pg15718.txt", #Bleyer, How To Write Special Feature Articles
"https://www.gutenberg.org/files/492/492-0.txt", #Essays in the Art of Writing, by Robert Louis
"https://www.gutenberg.org/files/37090/37090-0.txt", #Our Knowledge of the External World as a Field for Scientific Method in Philosoph, by Bertrand Russell
"https://www.gutenberg.org/files/42580/42580-8.txt", #Expository Writing, by Mervin James Curl
"http://www.gutenberg.org/cache/epub/2529/pg2529.txt", #The Analysis of Mind, by Bertrand Russell
"https://www.gutenberg.org/files/38280/38280-0.txt", #Modern Essays, by Various
"https://www.gutenberg.org/files/205/205-0.txt", #Walden, and On The Duty Of Civil Disobedience, by Henry David Thoreau
"https://www.gutenberg.org/files/1022/1022-0.txt", #Walking, by Henry David Thoreau
"http://www.gutenberg.org/cache/epub/34901/pg34901.txt",
"https://www.gutenberg.org/files/98/98-0.txt",
"http://www.gutenberg.org/cache/epub/32168/pg32168.txt",
"https://www.gutenberg.org/files/1250/1250-0.txt",
"https://www.gutenberg.org/files/140/140-0.txt",
"https://www.gutenberg.org/files/215/215-0.txt", # London, call of the wild.
"http://www.gutenberg.org/cache/epub/910/pg910.txt", #London White Fang
"http://www.gutenberg.org/cache/epub/10378/pg10378.txt",
"http://www.gutenberg.org/cache/epub/5123/pg5123.txt",
"http://www.gutenberg.org/cache/epub/5669/pg5669.txt",
"https://ota.bodleian.ox.ac.uk/repository/xmlui/bitstream/handle/20.500.12024/1906/cardinal-1906.txt?sequence=3&isAllowed=y",
"https://ota.bodleian.ox.ac.uk/repository/xmlui/bitstream/handle/20.500.12024/1658/WoolfWaves-1658.txt?sequence=4&isAllowed=y",
"https://ota.bodleian.ox.ac.uk/repository/xmlui/bitstream/handle/20.500.12024/0172/moderns-0172.txt?sequence=4&isAllowed=y",
]
def filterSentences(sentList):
filteredList = []
for sent in sentList:
sent = sent.replace("\\","")
sent = sent.replace("\\'","")
if len(sent) < 40:
continue
if '^' in sent:
continue
if bool(re.search(r"\b[A-Z][A-Z]+\b", sent)):
continue
if bool(re.search(r'_', sent)):
continue
sent = sent.strip()
sent = ' '.join(sent.split())
filteredList.append(sent + '\n')
return filteredList
taggedTexts = []
for url in taggedCorpus:
taggedTexts.append(readCorpus(url))
sum([len(x) for x in taggedTexts])
allTaggedSophs = [y for x in taggedTexts for y in x]
jar = "stanford-ner-2018-10-16/stanford-ner-3.9.2.jar"
model = "stanford-ner-2018-10-16/classifiers/english.muc.7class.distsim.crf.ser.gz"
st = StanfordNERTagger(model, jar)
TAG_HASH = {}
def tagSentence(sent):
tokenize = word_tokenize(sent)
tagged = st.tag(tokenize)
tokens = dict([x for x in tagged if x[1] != 'O'])
tokens = {x: "<" + y + ">" for x,y in tokens.items() }
new_sent = [tokens.get(x, x) for x in tokenize]
for word, repl in tokens.items():
TAG_HASH[word] = repl
return(' '.join(new_sent) + '\n')
tagged_write_path = SAVE_DATA_PATH + 'tagged_data/'
batches = [x for x in range(len(allTaggedSophs)) if x % 1000 == 0]
batches.append(len(allTaggedSophs))
for i in tqdm.notebook.tqdm(range(len(batches)-1)):
start = batches[i]
stop = batches[i+1]
batch = allTaggedSophs[start:stop]
stanfordTaggedSophs = []
for sent in tqdm.notebook.tqdm(batch):
stanfordTaggedSophs.append(tagSentence(sent))
with open(tagged_write_path + f'batch{i}', 'w') as f:
f.writelines(stanfordTaggedSophs)
TAG_HASH
#https://spacy.io/api/annotation#named-entities
spacy2stanford = {
'NORP': 'CAPS',
'FAC': 'LOCATION',
'ORG': 'ORGANIZATION',
'GPE': 'STATE',
'LOC': 'LOCATION',
'PRODUCT': 'CAPS',
'EVENT': 'CAPS',
'WORK_OF_ART': 'CAPS',
'LAW': 'CAPS',
'LANGUAGE': 'CAPS',
'QUANTITY': 'NUM',
'ORDINAL': 'NUM',
'CARDINAL': 'NUM'
}
spacy_write_path = SAVE_DATA_PATH + 'tagged_data/'
SPACY_TOKENS = {}
def spacyTagger(sent):
tokenize = word_tokenize(sent)
document = spacy_nlp(sent)
token_map = {}
for element in document.ents:
label = spacy2stanford.get(str(element.label_), str(element.label_))
SPACY_TOKENS[str(element)] = label
token_map[str(element)] = "<" + label + ">"
new_sent = [token_map.get(x, x) for x in tokenize]
return(' '.join(new_sent) + '\n')
spacyTaggedSophs = []
for sent in tqdm.notebook.tqdm(allTaggedSophs):
spacyTaggedSophs.append(spacyTagger(sent))
with open(spacy_write_path + f'spacyTaggedSophs.txt', 'w') as f:
f.writelines(spacyTaggedSophs)
with open(spacy_write_path + 'spacyTaggedSophs.txt') as f:
tagged = f.readlines()
tagged = list(map(str.lower, tagged))
keepshort = []
for sent in tagged:
words = len(sent.split(' '))
if words <= 30:
keepshort.append(sent)
len(keepshort)
keepnum = 32100
sophstrain, sophtest = splitData(keepshort, keepnum)
print(len(sophtest))
print(len(sophstrain))
with open(SAVE_DATA_PATH + "soph_test_tagged.txt", 'w') as f:
f.writelines(sophtest)
with open(SAVE_DATA_PATH + "soph_train_tagged.txt", 'w') as f:
f.writelines(sophstrain)
allnaive = [x for x in split_aes if len(x) > 20]
naiveshort = []
for sent in allnaive:
words = len(sent.split(' '))
if words <= 30:
naiveshort.append(sent)
len(naiveshort)
naivetrain, naivetest = splitData(naiveshort, keepnum)
print(len(naivetest))
print(len(naivetrain))
with open(SAVE_DATA_PATH + "naive_test_tagged.txt", 'w') as f:
f.writelines(naivetest)
with open(SAVE_DATA_PATH + "naive_train_tagged.txt", 'w') as f:
f.writelines(naivetrain)
def removePunc(sent):
punct = string.punctuation.replace('<', '').replace('>', '')
sent = re.sub('['+punct+']', '', sent)
sent = ' '.join(sent.split())
return(sent + '\n')
tagged_nopunct = list(map(removePunc, tagged))
split_aes_nopunct = list(map(removePunc, split_aes))
short_soph_tagged = []
for sent in tagged_nopunct:
words = len(sent.split(' '))
if words <= 30:
short_soph_tagged.append(sent)
sophstrain, sophtest = splitData(short_soph_tagged, keepnum)
print(len(sophtest))
print(len(sophstrain))
with open(SAVE_DATA_PATH + "soph_test_tagged_nopunct.txt", 'w') as f:
f.writelines(sophtest)
with open(SAVE_DATA_PATH + "soph_train_tagged_nopunct.txt", 'w') as f:
f.writelines(sophstrain)
allnaive = [x for x in split_aes_nopunct if len(x) > 20]
naiveshort_tag_np = []
for sent in allnaive:
words = len(sent.split(' '))
if words <= 30:
naiveshort_tag_np.append(sent)
naivetrain, naivetest = splitData(naiveshort_tag_np, keepnum)
with open(SAVE_DATA_PATH + "naive_test_tagged_nopunct.txt", 'w') as f:
f.writelines(naivetest)
with open(SAVE_DATA_PATH + "naive_train_tagged_nopunct.txt", 'w') as f:
f.writelines(naivetrain)
data = []
for item in text:
data.append(item.get_text().split('\n'))
paginated_links = "https://www.mykidsway.com/essays/page/{}/"
all_essays = req = requests.get("https://www.mykidsway.com/essays/")
essay_html = BeautifulSoup(all_essays.content, 'html.parser')
divs = essay_html.find_all("div", class_="hovereffect")
all_links = []
for content in divs:
all_links.append(content.find("a").get("href"))
for i in range(1,20):
new_page = paginated_links.format(str(i))
all_essays = req = requests.get(new_page)
essay_html = BeautifulSoup(all_essays.content, 'html.parser')
divs = essay_html.find_all("div", class_="hovereffect")
for content in divs:
all_links.append(content.find("a").get("href"))
sleep(1)
def getText(link):
req = requests.get(link)
soup = BeautifulSoup(req.content, 'html.parser')
text = soup.find_all("span", itemprop="description")
data = []
for item in text:
split_text = item.get_text().split('\n')
total_len = sum([len(x) for x in split_text])
if total_len > 2000:
print("skipping ", link)
continue
for sentence in split_text:
data.append(sentence)
return data
all_sentences = []
for link in set(all_links):
print(link)
data_list = getText(link)
for sentence in data_list:
all_sentences.append(sentence)
sleep(1)
def cleanKMW(data_list):
newList = []
for sent in data_list:
if len(sent) < 40:
continue
if '^' in sent:
continue
if bool(re.search(r'\d', sent)):
continue
sent = sent.lower()
newList.append(sent + '\n')
return newList
cleanedKMW = cleanKMW(all_sentences)
reordered = []
for sent in cleanedKMW:
split_sent = sent_tokenize(sent.strip())
for sentence in split_sent:
if (len(word_tokenize(sentence)) > 20) or (len(word_tokenize(sentence)) < 4):
continue
reordered.append(sentence + '\n')
with open(SAVE_DATA_PATH + "KMW_essays.txt", 'w') as f:
f.writelines(reordered)
def limitLength(dataList, maxlen):
keepshort = []
for sent in dataList:
words = len(word_tokenize(sent))
if words <= maxlen:
keepshort.append(sent)
return keepshort
def writeOut(data, fileroot, maxlen):
keepshort = limitLength(dataList, maxlen)
train, test = splitData(keepshort, keepnum)
with open(SAVE_DATA_PATH + f"test_{fileroot}.txt", 'w') as f:
f.writelines(test)
with open(SAVE_DATA_PATH + f"train_{fileroot}.txt", 'w') as f:
f.writelines(train)
with open(SAVE_DATA_PATH + "KMW_essays.txt", 'r') as f:
kmw = f.readlines()
with open(SAVE_DATA_PATH + "aes.txt", 'r') as f:
split_aes = f.readlines()
allnaive = kmw + split_aes[0:50000]
writeOut(allnaive, "naive_3", 35)
| 0.222025 | 0.374448 |
<a href="https://colab.research.google.com/github/JSJeong-me/KOSA-Pytorch/blob/main/CLIP_Zero_Shot_Image_Classification.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# How to use CLIP Zero-Shot on your own classificaiton dataset
This notebook provides an example of how to benchmark CLIP's zero shot classification performance on your own classification dataset.
[CLIP](https://openai.com/blog/clip/) is a new zero shot image classifier relased by OpenAI that has been trained on 400 million text/image pairs across the web. CLIP uses these learnings to make predicts based on a flexible span of possible classification categories.
CLIP is zero shot, that means **no training is required**.
Try it out on your own task here!
Be sure to experiment with various text prompts to unlock the richness of CLIP's pretraining procedure.
# Download and Install CLIP Dependencies
```
#installing some dependencies, CLIP was release in PyTorch
import subprocess
CUDA_version = [s for s in subprocess.check_output(["nvcc", "--version"]).decode("UTF-8").split(", ") if s.startswith("release")][0].split(" ")[-1]
print("CUDA version:", CUDA_version)
if CUDA_version == "10.0":
torch_version_suffix = "+cu100"
elif CUDA_version == "10.1":
torch_version_suffix = "+cu101"
elif CUDA_version == "10.2":
torch_version_suffix = ""
else:
torch_version_suffix = "+cu110"
!pip install torch==1.7.1{torch_version_suffix} torchvision==0.8.2{torch_version_suffix} -f https://download.pytorch.org/whl/torch_stable.html ftfy regex
import numpy as np
import torch
import os
print("Torch version:", torch.__version__)
os.kill(os.getpid(), 9)
#Your notebook process will restart after these installs
#clone the CLIP repository
!git clone https://github.com/openai/CLIP.git
%cd CLIP
```
# Download Classification Data or Object Detection Data
We will download the [public flowers classificaiton dataset](https://public.roboflow.com/classification/flowers_classification) from Roboflow. The data will come out as folders broken into train/valid/test splits and seperate folders for each class label.
You can easily download your own dataset from Roboflow in this format, too.
We made a conversion from object detection to CLIP text prompts in Roboflow, too, if you want to try that out.
To get your data into Roboflow, follow the [Getting Started Guide](https://blog.roboflow.ai/getting-started-with-roboflow/).
```
#download classification data
#replace with your link
!curl -L "https://public.roboflow.com/ds/iwFPJ4BJdO?key=M0lehxxyds" > roboflow.zip; unzip roboflow.zip; rm roboflow.zip
import os
#our the classes and images we want to test are stored in folders in the test set
class_names = os.listdir('./test/')
class_names.remove('_tokenization.txt')
class_names
#we auto generate some example tokenizations in Roboflow but you should edit this file to try out your own prompts
#CLIP gets a lot better with the right prompting!
#be sure the tokenizations are in the same order as your class_names above!
%cat ./test/_tokenization.txt
#edit your prompts as you see fit here, be sure the classes are in teh same order as above
%%writefile ./test/_tokenization.txt
An example picture from the flowers dataset depicting a daisy
An example picture from the flowers dataset depicting a dandelion
candidate_captions = []
with open('./test/_tokenization.txt') as f:
candidate_captions = f.read().splitlines()
```
# Run CLIP inference on your classification dataset
```
import torch
import clip
from PIL import Image
import glob
def argmax(iterable):
return max(enumerate(iterable), key=lambda x: x[1])[0]
device = "cuda" if torch.cuda.is_available() else "cpu"
model, transform = clip.load("ViT-B/32", device=device)
correct = []
#define our target classificaitons, you can should experiment with these strings of text as you see fit, though, make sure they are in the same order as your class names above
text = clip.tokenize(candidate_captions).to(device)
for cls in class_names:
class_correct = []
test_imgs = glob.glob('./test/' + cls + '/*.jpg')
for img in test_imgs:
#print(img)
image = transform(Image.open(img)).unsqueeze(0).to(device)
with torch.no_grad():
image_features = model.encode_image(image)
text_features = model.encode_text(text)
logits_per_image, logits_per_text = model(image, text)
probs = logits_per_image.softmax(dim=-1).cpu().numpy()
pred = class_names[argmax(list(probs)[0])]
#print(pred)
if pred == cls:
correct.append(1)
class_correct.append(1)
else:
correct.append(0)
class_correct.append(0)
print('accuracy on class ' + cls + ' is :' + str(sum(class_correct)/len(class_correct)))
print('accuracy on all is : ' + str(sum(correct)/len(correct)))
#Hope you enjoyed!
#As always, happy inferencing
#Roboflow
```
|
github_jupyter
|
#installing some dependencies, CLIP was release in PyTorch
import subprocess
CUDA_version = [s for s in subprocess.check_output(["nvcc", "--version"]).decode("UTF-8").split(", ") if s.startswith("release")][0].split(" ")[-1]
print("CUDA version:", CUDA_version)
if CUDA_version == "10.0":
torch_version_suffix = "+cu100"
elif CUDA_version == "10.1":
torch_version_suffix = "+cu101"
elif CUDA_version == "10.2":
torch_version_suffix = ""
else:
torch_version_suffix = "+cu110"
!pip install torch==1.7.1{torch_version_suffix} torchvision==0.8.2{torch_version_suffix} -f https://download.pytorch.org/whl/torch_stable.html ftfy regex
import numpy as np
import torch
import os
print("Torch version:", torch.__version__)
os.kill(os.getpid(), 9)
#Your notebook process will restart after these installs
#clone the CLIP repository
!git clone https://github.com/openai/CLIP.git
%cd CLIP
#download classification data
#replace with your link
!curl -L "https://public.roboflow.com/ds/iwFPJ4BJdO?key=M0lehxxyds" > roboflow.zip; unzip roboflow.zip; rm roboflow.zip
import os
#our the classes and images we want to test are stored in folders in the test set
class_names = os.listdir('./test/')
class_names.remove('_tokenization.txt')
class_names
#we auto generate some example tokenizations in Roboflow but you should edit this file to try out your own prompts
#CLIP gets a lot better with the right prompting!
#be sure the tokenizations are in the same order as your class_names above!
%cat ./test/_tokenization.txt
#edit your prompts as you see fit here, be sure the classes are in teh same order as above
%%writefile ./test/_tokenization.txt
An example picture from the flowers dataset depicting a daisy
An example picture from the flowers dataset depicting a dandelion
candidate_captions = []
with open('./test/_tokenization.txt') as f:
candidate_captions = f.read().splitlines()
import torch
import clip
from PIL import Image
import glob
def argmax(iterable):
return max(enumerate(iterable), key=lambda x: x[1])[0]
device = "cuda" if torch.cuda.is_available() else "cpu"
model, transform = clip.load("ViT-B/32", device=device)
correct = []
#define our target classificaitons, you can should experiment with these strings of text as you see fit, though, make sure they are in the same order as your class names above
text = clip.tokenize(candidate_captions).to(device)
for cls in class_names:
class_correct = []
test_imgs = glob.glob('./test/' + cls + '/*.jpg')
for img in test_imgs:
#print(img)
image = transform(Image.open(img)).unsqueeze(0).to(device)
with torch.no_grad():
image_features = model.encode_image(image)
text_features = model.encode_text(text)
logits_per_image, logits_per_text = model(image, text)
probs = logits_per_image.softmax(dim=-1).cpu().numpy()
pred = class_names[argmax(list(probs)[0])]
#print(pred)
if pred == cls:
correct.append(1)
class_correct.append(1)
else:
correct.append(0)
class_correct.append(0)
print('accuracy on class ' + cls + ' is :' + str(sum(class_correct)/len(class_correct)))
print('accuracy on all is : ' + str(sum(correct)/len(correct)))
#Hope you enjoyed!
#As always, happy inferencing
#Roboflow
| 0.285372 | 0.92657 |
```
import numpy as np
from datetime import datetime, timedelta
%matplotlib notebook
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
data = np.genfromtxt("data.csv", delimiter=",", dtype=None, encoding="UTF-8")
class Voter:
def __init__(self, canidates, dt, normal, ranks):
self.candidates = canidates
self.dt = dt
self.normal = normal
self.voting_list = []
for i in range(1, len(ranks)+1):
try:
j = ranks.index(i)
self.voting_list += [candidates[j]]
except ValueError:
pass
def vote(self, rejects=[]):
for el in self.voting_list:
if el not in rejects:
return el
return None
def __getitem__(self, i):
return self.voting_list[i]
def __len__(self):
return len(self.voting_list)
class Candidate:
def __init__(self, i, name):
self.i = i
self.name = name
def s(self):
return self.name.split(" ")[0]
def __repr__(self):
return "Candidate({}, {})".format(self.i, self.name)
def __str__(self):
return self.name
def summary(candidates, votes):
for candidate in candidates:
print(candidate, votes.count(candidate))
def get_candidate(search):
for candidate in candidates:
if search in candidate.name:
return candidate
candidates = []
for i, el in enumerate(data[0][1:-1]):
candidates += [Candidate(i+1, el[51:-1])]
print(candidates)
none_cand = Candidate(0, None)
voters = []
for row in data[1:]:
dt = datetime.strptime(row[0], "%d/%m/%Y %H:%M:%S")
try:
normal = [x for x in candidates if x.name == row[-1]][0]
except IndexError:
normal = none_cand
ranks = [int(x) if x!="" else None for x in row[1:-1]]
voters += [Voter(candidates, dt, normal, ranks)]
[[y.name for y in x.voting_list] for x in voters][3]
rejects = []
for k in range(15):
print("-"*25)
# print(rejects)
votes = [x.vote(rejects) for x in voters]
nvotes = [votes.count(candidate) for candidate in candidates]
total = len(votes)
for i in range(len(candidates)):
print(candidates[i], nvotes[i])
if np.amax(nvotes)/total > 0.5:
winner = candidates[np.argmax(nvotes)]
print("The winner is {} with {:.2f}% of the vote".format(winner, np.amax(nvotes)/total*100))
break
else:
reject = None
minv = np.inf
for i in range(len(nvotes)):
# print(nvotes[i], candidates[i])
if nvotes[i] < minv and candidates[i] not in rejects:
reject = candidates[i]
minv = nvotes[i]
rejects += [reject]
print("We reject {} with {:.2f}% of the vote".format(reject, minv/total*100))
b = [v for v in voters if len(v) and v[0]==get_candidate("HOŁ")]
summary(candidates, [v[1] for v in b if len(v)>0])
summary(candidates, [v.vote() for v in voters])
# http://sankeymatic.com/build/
rejects = []
tab = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#000', '#9467bd', '#8c564b', '#000',
'#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
tab = ['#1b1462', '#d62728', '#2ca02c', '#f9c013', '#000', '#9467bd', '#e377c2', '', '#ff7f0e', '#17becf', '#bcbd22']
# fmt = lambda x: str(x)[:4] + "."
fmt = lambda x: str(x).split(" ")[0]
for r in range(11):
if r:
for i in range(len(candidates)):
if nvotes[i] and candidates[i] not in rejects:
print("{}. {} [{}] {}. {} {}.1".format(r-1, fmt(candidates[i]), nvotes[i], r, fmt(candidates[i]), tab[i]))
for candidate in candidates:
if candidate not in rejects:
print(":{}. {} {}".format(r, fmt(candidate), tab[candidate.i-1]))
votes = [x.vote(rejects) for x in voters]
nvotes = [votes.count(candidate) for candidate in candidates]
total = len(votes)
if np.amax(nvotes)/total > 0.5:
print(":{}. NIKT #aeaeae".format(r))
pass
else:
print("{}. NIKT [{}] {}. NIKT".format(r, votes.count(None), r+1))
print(":{}. NIKT #aeaeae".format(r))
reject = None
minv = np.inf
for i in range(len(nvotes)):
if nvotes[i] < minv and candidates[i] not in rejects:
reject = candidates[i]
minv = nvotes[i]
voters_rejected = [x for x in voters if x.vote(rejects) == reject]
rejects += [reject]
rvotes = [x.vote(rejects) for x in voters_rejected]
rnvotes = [rvotes.count(candidate) for candidate in candidates]
for i in range(len(candidates)):
if rnvotes[i]:
print("{}. {} [{}] {}. {}".format(r, fmt(reject), rnvotes[i], r+1, fmt(candidates[i])))
noped = rvotes.count(None)
# print("noped", noped)
if noped:
print("{}. {} [{}] {}. NIKT".format(r, fmt(reject), noped, r+1))
```
# Pie chart
```
candidates
fig, ax = plt.subplots()
tab2 = ['#1b1462', '#d62728', '#2ca02c', '#f9c013', '#9467bd', '#e377c2', '#ff7f0e', '#17becf', '#bcbd22', "#aeaeae"]
normal = [x.normal for x in voters]
ax.pie([normal.count(x) for x in candidates+[none_cand] if normal.count(x)], labels=[x.name.split(" ")[0] for x in candidates if normal.count(x)]+["NIKT"],
autopct='%1.1f%%', colors=tab2, pctdistance=0.8, textprops={"backgroundcolor":"#ffffff50"})
ax.axis('equal')
plt.show()
fig, ax = plt.subplots()
tab2 = ['#1b1462', '#d62728', '#2ca02c', '#f9c013', '#9467bd', '#e377c2', '#ff7f0e', '#17becf', '#bcbd22', "#aeaeae"]
normal = [x.vote() for x in voters]
ax.pie([normal.count(x) for x in candidates+[None] if normal.count(x)], labels=[x.name.split(" ")[0] for x in candidates if normal.count(x)]+["NIKT"],
autopct='%1.1f%%', colors=tab2, pctdistance=0.8, textprops={"backgroundcolor":"#ffffff50"})
ax.axis('equal')
plt.show()
fig, ax = plt.subplots()
tab2 = ['#1b1462', '#d62728', '#2ca02c', '#f9c013', '#9467bd', '#e377c2', '#ff7f0e', '#17becf', '#bcbd22', "#aeaeae"]
rejects = [get_candidate("BOS")]
normal = [x.vote(rejects) for x in voters if x.vote() in rejects]
ax.pie([normal.count(x) for x in candidates+[None] if normal.count(x)], labels=[x.name.split(" ")[0] for x in candidates if normal.count(x)] + ["NIKT"],
autopct='%1.1f%%', colors=tab2, pctdistance=0.8, textprops={"backgroundcolor":"#ffffff50"})
ax.axis('equal')
plt.show()
```
# Time histogram?
```
fig, ax = plt.subplots()
# ax.hist(mdates.date2num([x.dt for x in voters]), bins=50)
start = datetime(2020,6,26,19)
ax.hist(mdates.date2num([x.dt for x in voters]), bins=mdates.date2num([start+i*timedelta(hours=1) for i in range(51)]))
ax.xaxis.set_major_locator(mdates.DayLocator())
ax.xaxis.set_minor_locator(mdates.HourLocator(byhour=range(0,24,3)))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d.%m.%Y'))
ax.xaxis.set_minor_formatter(mdates.DateFormatter('%H'))
ax.grid(which='minor', ls="--")
ax.grid(which='major', axis='x', ls="-", c='black')
ax.grid(which='major', axis='y')
ax.tick_params(which="minor", top=True, labeltop=True, labelbottom=False)
plt.show()
```
|
github_jupyter
|
import numpy as np
from datetime import datetime, timedelta
%matplotlib notebook
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
data = np.genfromtxt("data.csv", delimiter=",", dtype=None, encoding="UTF-8")
class Voter:
def __init__(self, canidates, dt, normal, ranks):
self.candidates = canidates
self.dt = dt
self.normal = normal
self.voting_list = []
for i in range(1, len(ranks)+1):
try:
j = ranks.index(i)
self.voting_list += [candidates[j]]
except ValueError:
pass
def vote(self, rejects=[]):
for el in self.voting_list:
if el not in rejects:
return el
return None
def __getitem__(self, i):
return self.voting_list[i]
def __len__(self):
return len(self.voting_list)
class Candidate:
def __init__(self, i, name):
self.i = i
self.name = name
def s(self):
return self.name.split(" ")[0]
def __repr__(self):
return "Candidate({}, {})".format(self.i, self.name)
def __str__(self):
return self.name
def summary(candidates, votes):
for candidate in candidates:
print(candidate, votes.count(candidate))
def get_candidate(search):
for candidate in candidates:
if search in candidate.name:
return candidate
candidates = []
for i, el in enumerate(data[0][1:-1]):
candidates += [Candidate(i+1, el[51:-1])]
print(candidates)
none_cand = Candidate(0, None)
voters = []
for row in data[1:]:
dt = datetime.strptime(row[0], "%d/%m/%Y %H:%M:%S")
try:
normal = [x for x in candidates if x.name == row[-1]][0]
except IndexError:
normal = none_cand
ranks = [int(x) if x!="" else None for x in row[1:-1]]
voters += [Voter(candidates, dt, normal, ranks)]
[[y.name for y in x.voting_list] for x in voters][3]
rejects = []
for k in range(15):
print("-"*25)
# print(rejects)
votes = [x.vote(rejects) for x in voters]
nvotes = [votes.count(candidate) for candidate in candidates]
total = len(votes)
for i in range(len(candidates)):
print(candidates[i], nvotes[i])
if np.amax(nvotes)/total > 0.5:
winner = candidates[np.argmax(nvotes)]
print("The winner is {} with {:.2f}% of the vote".format(winner, np.amax(nvotes)/total*100))
break
else:
reject = None
minv = np.inf
for i in range(len(nvotes)):
# print(nvotes[i], candidates[i])
if nvotes[i] < minv and candidates[i] not in rejects:
reject = candidates[i]
minv = nvotes[i]
rejects += [reject]
print("We reject {} with {:.2f}% of the vote".format(reject, minv/total*100))
b = [v for v in voters if len(v) and v[0]==get_candidate("HOŁ")]
summary(candidates, [v[1] for v in b if len(v)>0])
summary(candidates, [v.vote() for v in voters])
# http://sankeymatic.com/build/
rejects = []
tab = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#000', '#9467bd', '#8c564b', '#000',
'#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
tab = ['#1b1462', '#d62728', '#2ca02c', '#f9c013', '#000', '#9467bd', '#e377c2', '', '#ff7f0e', '#17becf', '#bcbd22']
# fmt = lambda x: str(x)[:4] + "."
fmt = lambda x: str(x).split(" ")[0]
for r in range(11):
if r:
for i in range(len(candidates)):
if nvotes[i] and candidates[i] not in rejects:
print("{}. {} [{}] {}. {} {}.1".format(r-1, fmt(candidates[i]), nvotes[i], r, fmt(candidates[i]), tab[i]))
for candidate in candidates:
if candidate not in rejects:
print(":{}. {} {}".format(r, fmt(candidate), tab[candidate.i-1]))
votes = [x.vote(rejects) for x in voters]
nvotes = [votes.count(candidate) for candidate in candidates]
total = len(votes)
if np.amax(nvotes)/total > 0.5:
print(":{}. NIKT #aeaeae".format(r))
pass
else:
print("{}. NIKT [{}] {}. NIKT".format(r, votes.count(None), r+1))
print(":{}. NIKT #aeaeae".format(r))
reject = None
minv = np.inf
for i in range(len(nvotes)):
if nvotes[i] < minv and candidates[i] not in rejects:
reject = candidates[i]
minv = nvotes[i]
voters_rejected = [x for x in voters if x.vote(rejects) == reject]
rejects += [reject]
rvotes = [x.vote(rejects) for x in voters_rejected]
rnvotes = [rvotes.count(candidate) for candidate in candidates]
for i in range(len(candidates)):
if rnvotes[i]:
print("{}. {} [{}] {}. {}".format(r, fmt(reject), rnvotes[i], r+1, fmt(candidates[i])))
noped = rvotes.count(None)
# print("noped", noped)
if noped:
print("{}. {} [{}] {}. NIKT".format(r, fmt(reject), noped, r+1))
candidates
fig, ax = plt.subplots()
tab2 = ['#1b1462', '#d62728', '#2ca02c', '#f9c013', '#9467bd', '#e377c2', '#ff7f0e', '#17becf', '#bcbd22', "#aeaeae"]
normal = [x.normal for x in voters]
ax.pie([normal.count(x) for x in candidates+[none_cand] if normal.count(x)], labels=[x.name.split(" ")[0] for x in candidates if normal.count(x)]+["NIKT"],
autopct='%1.1f%%', colors=tab2, pctdistance=0.8, textprops={"backgroundcolor":"#ffffff50"})
ax.axis('equal')
plt.show()
fig, ax = plt.subplots()
tab2 = ['#1b1462', '#d62728', '#2ca02c', '#f9c013', '#9467bd', '#e377c2', '#ff7f0e', '#17becf', '#bcbd22', "#aeaeae"]
normal = [x.vote() for x in voters]
ax.pie([normal.count(x) for x in candidates+[None] if normal.count(x)], labels=[x.name.split(" ")[0] for x in candidates if normal.count(x)]+["NIKT"],
autopct='%1.1f%%', colors=tab2, pctdistance=0.8, textprops={"backgroundcolor":"#ffffff50"})
ax.axis('equal')
plt.show()
fig, ax = plt.subplots()
tab2 = ['#1b1462', '#d62728', '#2ca02c', '#f9c013', '#9467bd', '#e377c2', '#ff7f0e', '#17becf', '#bcbd22', "#aeaeae"]
rejects = [get_candidate("BOS")]
normal = [x.vote(rejects) for x in voters if x.vote() in rejects]
ax.pie([normal.count(x) for x in candidates+[None] if normal.count(x)], labels=[x.name.split(" ")[0] for x in candidates if normal.count(x)] + ["NIKT"],
autopct='%1.1f%%', colors=tab2, pctdistance=0.8, textprops={"backgroundcolor":"#ffffff50"})
ax.axis('equal')
plt.show()
fig, ax = plt.subplots()
# ax.hist(mdates.date2num([x.dt for x in voters]), bins=50)
start = datetime(2020,6,26,19)
ax.hist(mdates.date2num([x.dt for x in voters]), bins=mdates.date2num([start+i*timedelta(hours=1) for i in range(51)]))
ax.xaxis.set_major_locator(mdates.DayLocator())
ax.xaxis.set_minor_locator(mdates.HourLocator(byhour=range(0,24,3)))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d.%m.%Y'))
ax.xaxis.set_minor_formatter(mdates.DateFormatter('%H'))
ax.grid(which='minor', ls="--")
ax.grid(which='major', axis='x', ls="-", c='black')
ax.grid(which='major', axis='y')
ax.tick_params(which="minor", top=True, labeltop=True, labelbottom=False)
plt.show()
| 0.210036 | 0.401131 |
```
# LDA
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('LDA/Wine.csv')
X = dataset.iloc[:, 0:13].values
y = dataset.iloc[:, 13].values
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Applying LDA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
lda = LDA(n_components = 2)
X_train = lda.fit_transform(X_train, y_train)
X_test = lda.transform(X_test)
# Fitting Logistic Regression to the Training set
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression(random_state = 0)
classifier.fit(X_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
# Visualising the Training set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green', 'blue')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green', 'blue'))(i), label = j)
plt.title('Logistic Regression (Training set)')
plt.xlabel('LD1')
plt.ylabel('LD2')
plt.legend()
plt.show()
# Visualising the Test set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green', 'blue')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green', 'blue'))(i), label = j)
plt.title('Logistic Regression (Test set)')
plt.xlabel('LD1')
plt.ylabel('LD2')
plt.legend()
plt.show()
```
|
github_jupyter
|
# LDA
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('LDA/Wine.csv')
X = dataset.iloc[:, 0:13].values
y = dataset.iloc[:, 13].values
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Applying LDA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
lda = LDA(n_components = 2)
X_train = lda.fit_transform(X_train, y_train)
X_test = lda.transform(X_test)
# Fitting Logistic Regression to the Training set
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression(random_state = 0)
classifier.fit(X_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
# Visualising the Training set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green', 'blue')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green', 'blue'))(i), label = j)
plt.title('Logistic Regression (Training set)')
plt.xlabel('LD1')
plt.ylabel('LD2')
plt.legend()
plt.show()
# Visualising the Test set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green', 'blue')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green', 'blue'))(i), label = j)
plt.title('Logistic Regression (Test set)')
plt.xlabel('LD1')
plt.ylabel('LD2')
plt.legend()
plt.show()
| 0.765506 | 0.804329 |
## 1. The dataset
<p>Walt Disney Studios is the foundation on which The Walt Disney Company was built. The Studios has produced more than 600 films since their debut film, Snow White and the Seven Dwarfs in 1937. While many of its films were big hits, some of them were not. In this notebook, we will explore a dataset of Disney movies and analyze what contributes to the success of Disney movies.</p>
<p><img src="https://assets.datacamp.com/production/project_740/img/jorge-martinez-instagram-jmartinezz9-431078-unsplash_edited.jpg" alt></p>
<p>First, we will take a look at the Disney data compiled by <a href="https://data.world/kgarrett/disney-character-success-00-16">Kelly Garrett</a>. The data contains 579 Disney movies with six features: movie title, release date, genre, MPAA rating, total gross, and inflation-adjusted gross. </p>
<p>Let's load the file and see what the data looks like.</p>
```
# Import pandas library
import pandas as pd
# Read the file into gross
gross = pd.read_csv("datasets/disney_movies_total_gross.csv",parse_dates=['release_date'])
# Print out gross
gross.head()
```
## 2. Top ten movies at the box office
<p>Let's started by exploring the data. We will check which are the 10 Disney movies that have earned the most at the box office. We can do this by sorting movies by their inflation-adjusted gross (we will call it adjusted gross from this point onward). </p>
```
# Sort data by the adjusted gross in descending order
gross.sort_values(by="inflation_adjusted_gross",ascending=False,inplace=True)
# Display the top 10 movies
gross.head(10)
```
## 3. Movie genre trend
<p>From the top 10 movies above, it seems that some genres are more popular than others. So, we will check which genres are growing stronger in popularity. To do this, we will group movies by genre and then by year to see the adjusted gross of each genre in each year.</p>
```
# Extract year from release_date and store it in a new column
gross['release_year'] = pd.DatetimeIndex(gross["release_date"]).year
# Compute mean of adjusted gross per genre and per year
group = gross.groupby(['genre','release_year']).mean()
# Convert the GroupBy object to a DataFrame
genre_yearly = group.reset_index()
# Inspect genre_yearly
genre_yearly.head(10)
```
## 4. Visualize the genre popularity trend
<p>We will make a plot out of these means of groups to better see how box office revenues have changed over time.</p>
```
# Import seaborn library
import seaborn as sns
# Plot the data
sns.relplot(kind='line',data=genre_yearly,x='release_year',
y='inflation_adjusted_gross',
hue='genre')
```
## 5. Data transformation
<p>The line plot supports our belief that some genres are growing faster in popularity than others. For Disney movies, Action and Adventure genres are growing the fastest. Next, we will build a linear regression model to understand the relationship between genre and box office gross. </p>
<p>Since linear regression requires numerical variables and the genre variable is a categorical variable, we'll use a technique called one-hot encoding to convert the categorical variables to numerical. This technique transforms each category value into a new column and assigns a 1 or 0 to the column. </p>
<p>For this dataset, there will be 11 dummy variables, one for each genre except the action genre which we will use as a baseline. For example, if a movie is an adventure movie, like The Lion King, the adventure variable will be 1 and other dummy variables will be 0. Since the action genre is our baseline, if a movie is an action movie, such as The Avengers, all dummy variables will be 0.</p>
```
# Convert genre variable to dummy variables
genre_dummies = pd.get_dummies(gross['genre'],drop_first=True)
# Inspect genre_dummies
genre_dummies.head()
```
## 6. The genre effect
<p>Now that we have dummy variables, we can build a linear regression model to predict the adjusted gross using these dummy variables.</p>
<p>From the regression model, we can check the effect of each genre by looking at its coefficient given in units of box office gross dollars. We will focus on the impact of action and adventure genres here. (Note that the intercept and the first coefficient values represent the effect of action and adventure genres respectively). We expect that movies like the Lion King or Star Wars would perform better for box office.</p>
```
# Import LinearRegression
from sklearn.linear_model import LinearRegression
# Build a linear regression model
regr = LinearRegression()
# Fit regr to the dataset
regr.fit(genre_dummies,gross.inflation_adjusted_gross)
# Get estimated intercept and coefficient values
action = regr.intercept_
adventure = regr.coef_[[0]][0]
# Inspect the estimated intercept and coefficient values
print((action, adventure))
```
## 7. Confidence intervals for regression parameters (i)
<p>Next, we will compute 95% confidence intervals for the intercept and coefficients. The 95% confidence intervals for the intercept <b><i>a</i></b> and coefficient <b><i>b<sub>i</sub></i></b> means that the intervals have a probability of 95% to contain the true value <b><i>a</i></b> and coefficient <b><i>b<sub>i</sub></i></b> respectively. If there is a significant relationship between a given genre and the adjusted gross, the confidence interval of its coefficient should exclude 0. </p>
<p>We will calculate the confidence intervals using the pairs bootstrap method. </p>
```
# Import a module
import numpy as np
# Create an array of indices to sample from
inds = np.arange(0,len(gross['genre']))
# Initialize 500 replicate arrays
size = 500
bs_action_reps = np.empty(size)
bs_adventure_reps = np.empty(size)
```
## 8. Confidence intervals for regression parameters (ii)
<p>After the initialization, we will perform pair bootstrap estimates for the regression parameters. Note that we will draw a sample from a set of (genre, adjusted gross) data where the genre is the original genre variable. We will perform one-hot encoding after that. </p>
```
# Generate replicates
for i in range(size):
# Resample the indices
bs_inds = np.random.choice(inds,size=len(inds))
# Get the sampled genre and sampled adjusted gross
bs_genre = gross['genre'][bs_inds]
bs_gross = gross['inflation_adjusted_gross'][bs_inds]
# Convert sampled genre to dummy variables
bs_dummies = pd.get_dummies(bs_genre,drop_first=True)
# Build and fit a regression model
regr = LinearRegression().fit(bs_dummies, bs_gross)
# Compute replicates of estimated intercept and coefficient
bs_action_reps[i] = regr.intercept_
bs_adventure_reps[i] = regr.coef_[[0]][0]
```
## 9. Confidence intervals for regression parameters (iii)
<p>Finally, we compute 95% confidence intervals for the intercept and coefficient and examine if they exclude 0. If one of them (or both) does, then it is unlikely that the value is 0 and we can conclude that there is a significant relationship between that genre and the adjusted gross. </p>
```
# Compute 95% confidence intervals for intercept and coefficient values
confidence_interval_action = np.percentile(bs_action_reps,[2.5, 97.5])
confidence_interval_adventure = np.percentile(bs_adventure_reps,[2.5, 97.5])
# Inspect the confidence intervals
print(confidence_interval_action)
print(confidence_interval_adventure)
```
## 10. Should Disney make more action and adventure movies?
<p>The confidence intervals from the bootstrap method for the intercept and coefficient do not contain the value zero, as we have already seen that lower and upper bounds of both confidence intervals are positive. These tell us that it is likely that the adjusted gross is significantly correlated with the action and adventure genres. </p>
<p>From the results of the bootstrap analysis and the trend plot we have done earlier, we could say that Disney movies with plots that fit into the action and adventure genre, according to our data, tend to do better in terms of adjusted gross than other genres. So we could expect more Marvel, Star Wars, and live-action movies in the upcoming years!</p>
```
# should Disney studios make more action and adventure movies?
more_action_adventure_movies = ...
```
|
github_jupyter
|
# Import pandas library
import pandas as pd
# Read the file into gross
gross = pd.read_csv("datasets/disney_movies_total_gross.csv",parse_dates=['release_date'])
# Print out gross
gross.head()
# Sort data by the adjusted gross in descending order
gross.sort_values(by="inflation_adjusted_gross",ascending=False,inplace=True)
# Display the top 10 movies
gross.head(10)
# Extract year from release_date and store it in a new column
gross['release_year'] = pd.DatetimeIndex(gross["release_date"]).year
# Compute mean of adjusted gross per genre and per year
group = gross.groupby(['genre','release_year']).mean()
# Convert the GroupBy object to a DataFrame
genre_yearly = group.reset_index()
# Inspect genre_yearly
genre_yearly.head(10)
# Import seaborn library
import seaborn as sns
# Plot the data
sns.relplot(kind='line',data=genre_yearly,x='release_year',
y='inflation_adjusted_gross',
hue='genre')
# Convert genre variable to dummy variables
genre_dummies = pd.get_dummies(gross['genre'],drop_first=True)
# Inspect genre_dummies
genre_dummies.head()
# Import LinearRegression
from sklearn.linear_model import LinearRegression
# Build a linear regression model
regr = LinearRegression()
# Fit regr to the dataset
regr.fit(genre_dummies,gross.inflation_adjusted_gross)
# Get estimated intercept and coefficient values
action = regr.intercept_
adventure = regr.coef_[[0]][0]
# Inspect the estimated intercept and coefficient values
print((action, adventure))
# Import a module
import numpy as np
# Create an array of indices to sample from
inds = np.arange(0,len(gross['genre']))
# Initialize 500 replicate arrays
size = 500
bs_action_reps = np.empty(size)
bs_adventure_reps = np.empty(size)
# Generate replicates
for i in range(size):
# Resample the indices
bs_inds = np.random.choice(inds,size=len(inds))
# Get the sampled genre and sampled adjusted gross
bs_genre = gross['genre'][bs_inds]
bs_gross = gross['inflation_adjusted_gross'][bs_inds]
# Convert sampled genre to dummy variables
bs_dummies = pd.get_dummies(bs_genre,drop_first=True)
# Build and fit a regression model
regr = LinearRegression().fit(bs_dummies, bs_gross)
# Compute replicates of estimated intercept and coefficient
bs_action_reps[i] = regr.intercept_
bs_adventure_reps[i] = regr.coef_[[0]][0]
# Compute 95% confidence intervals for intercept and coefficient values
confidence_interval_action = np.percentile(bs_action_reps,[2.5, 97.5])
confidence_interval_adventure = np.percentile(bs_adventure_reps,[2.5, 97.5])
# Inspect the confidence intervals
print(confidence_interval_action)
print(confidence_interval_adventure)
# should Disney studios make more action and adventure movies?
more_action_adventure_movies = ...
| 0.591841 | 0.986841 |
```
import os
from google.colab import drive
drive.mount('/drive')
os.symlink('/drive/My Drive', '/content/drive')
!ls -l /content/drive/
!apt-get install -y -qq software-properties-common python-software-properties module-init-tools
!add-apt-repository -y ppa:alessandro-strada/ppa 2>&1 > /dev/null
!apt-get update -qq 2>&1 > /dev/null
!apt-get -y install -qq google-drive-ocamlfuse fuse
from google.colab import auth
auth.authenticate_user()
from oauth2client.client import GoogleCredentials
creds = GoogleCredentials.get_application_default()
import getpass
!google-drive-ocamlfuse -headless -id={creds.client_id} -secret={creds.client_secret} < /dev/null 2>&1 | grep URL
vcode = getpass.getpass()
!echo {vcode} | google-drive-ocamlfuse -headless -id={creds.client_id} -secret={creds.client_secret}
import datetime
import json
import os
import pprint
import random
import string
import sys
import tensorflow as tf
# memory footprint support libraries/code
!ln -sf /opt/bin/nvidia-smi /usr/bin/nvidia-smi
!pip install gputil
!pip install psutil
!pip install humanize
import psutil
import humanize
import os
import GPUtil as GPU
GPUs = GPU.getGPUs()
# XXX: only one GPU on Colab and isn’t guaranteed
gpu = GPUs[0]
def printm():
process = psutil.Process(os.getpid())
print("Gen RAM Free: " + humanize.naturalsize( psutil.virtual_memory().available ), " | Proc size: " + humanize.naturalsize( process.memory_info().rss))
print("GPU RAM Free: {0:.0f}MB | Used: {1:.0f}MB | Util {2:3.0f}% | Total {3:.0f}MB".format(gpu.memoryFree, gpu.memoryUsed, gpu.memoryUtil*100, gpu.memoryTotal))
printm()
import numpy as np
from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D
from keras.layers import Dropout, Flatten, Dense
from keras.models import Sequential
from keras.layers.advanced_activations import LeakyReLU
from keras.datasets import cifar10
(train_features, train_labels), (test_features, test_labels) = cifar10.load_data()
model = Sequential()
model.add(Conv2D(filters=16, kernel_size=(2, 2), padding="same", activation="relu", input_shape=(train_features.shape[1:])))
model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
model.add(Conv2D(filters=32, kernel_size=(3, 3), padding="same", activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
model.add(Conv2D(filters=64, kernel_size=(4, 4), padding="same", activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
model.add(Flatten())
model.add(Dense(25600, activation="relu"))
model.add(Dense(25600, activation="relu"))
model.add(Dense(25600, activation="relu"))
model.add(Dense(25600, activation="relu"))
model.add(Dense(10, activation="softmax"))
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(train_features, train_labels, validation_split=0.2, epochs=10, batch_size=128, verbose=1)
!ps ax | grep python
!nvidia-smi
!kill -9 -1
# Load the Drive helper and mount
from google.colab import drive
# This will prompt for authorization.
drive.mount('/content/drive')
import sys
!test -d SRGAN || git clone https://github.com/leftthomas/SRGAN
if not 'SRGAN' in sys.path:
sys.path += ['SRGAN']
%cd SRGAN/
!pwd
import sys
!test -d SRGAN_Test || git clone https://github.com/goldenbili/SRGAN_Test.git
if not 'SRGAN_Test' in sys.path:
sys.path += ['SRGAN_Test']
%cd SRGAN_Test/
!pwd
!wget http://data.vision.ee.ethz.ch/cvl/DIV2K/DIV2K_train_HR.zip
!wget http://data.vision.ee.ethz.ch/cvl/DIV2K/DIV2K_valid_HR.zip
!unzip -q DIV2K_train_HR.zip -d data
!unzip -q DIV2K_valid_HR.zip -d data
!rm DIV2K_train_HR.zip
!rm DIV2K_valid_HR.zip
```
臨時操作 - 複製下載的訓練資料到 google 雲端
```
!cp -r data/DIV2K_train_HR /content/drive/MyDrive/SRGAN/data/DIV2K_train_HR
!cp -r data/DIV2K_valid_HR /content/drive/MyDrive/SRGAN/data/DIV2K_valid_HR
```
常數設定
```
Path_drive_Base = "/content/drive/SRGAN/"
Path_data_Base = "/content/drive/train_image/"
Path_dataInput_Base = "/content/SRGAN_Test/data/"
# Path_data_Base = "/content/SRGAN_Test/data/"
Path_snapshot = Path_drive_Base + "snapshots/"
Gan_model = 'netG_epoch_8.pth'
Dis_model = 'netD_epoch_8.pth'
!mkdir data
Sub_Train = "02**" #@param {type:"string"}
Sub_Valid = "080*" #@param {type:"string"}
Source_Train = Path_data_Base + "DIV2K_train_HR/" + Sub_Train + ".png"
Source_Valid = Path_data_Base + "DIV2K_valid_HR/" + Sub_Valid + ".png"
Path_train = Path_dataInput_Base + "DIV2K_train_" + Sub_Train
Path_valid = Path_dataInput_Base + "DIV2K_valid_" + Sub_Valid
!mkdir $Path_train
!mkdir $Path_valid
Path_train = Path_train + "/"
Path_valid = Path_valid + "/"
# Init Date time
from datetime import datetime
now = datetime.now()
Time_Info = datetime.strftime(now,'%Y-%m-%d_%H:%M:%S')
trainPath = Path_drive_Base + "training_results/Train_"+ Sub_Train + "_Valid_" + Sub_Valid
!mkdir $trainPath
trainPath = trainPath + '/' + datetime.strftime(now,'%Y-%m-%d_%H:%M:%S')
!mkdir $trainPath
trainPath = trainPath + '/'
statisticsPath = Path_drive_Base + "statistics/Train_"+ Sub_Train + "_Valid_" + Sub_Valid
!mkdir $statisticsPath
statisticsPath = statisticsPath + '/' + datetime.strftime(now,'%Y-%m-%d_%H:%M:%S')
!mkdir $statisticsPath
statisticsPath = statisticsPath + '/'
!cp $Source_Train $Path_train
!cp $Source_Valid $Path_valid
!cp ../drive/My\ Drive/SRGAN/DIV2K_train_HR.zip ./data/
!cp ../drive/My\ Drive/SRGAN/DIV2K_valid_HR.zip ./data/
!unzip -q ./data/DIV2K_train_HR.zip -d data
!unzip -q ./data/DIV2K_valid_HR.zip -d data
!rm ./data/DIV2K_train_HR.zip
!rm ./data/DIV2K_valid_HR.zip
!python -c "import torch; print(torch.__version__)"
!python -c "import torchvision; print(torchvision.__version__)"
!pip install torchvision==0.7
!pip install torch==1.6
!pip install cudatoolkit=10.1
! python train.py \
--num_epochs=5 \
--use_cuda=1 \
--batch_size=1 \
--snapshots_folder=$Path_snapshot \
--snapshots_Gan=$Gan_model \
--snapshots_Dis=$Dis_model \
--train_path=$Path_train \
--valid_path=$Path_valid \
--statistics=path $statisticsPath/ \
--epochs_path=$trainPath/ \
--willy_test=0 \
--do_resize=1
print('$Path_snapshot:')
print(Path_snapshot)
print('$Gan_model')
print(Gan_model)
print('$Dis_model')
print(Dis_model)
print('$Path_train')
print(Path_train)
print('$Path_valid')
print(Path_valid)
print('$statisticsPath:')
print(statisticsPath)
print('$trainPath:')
print(trainPath)
! python train.py \
--num_epochs 5 \
--use_cuda 1 \
--batch_size 1 \
--snapshots_folder $Path_snapshot \
--snapshots_Gan $Gan_model \
--snapshots_Dis $Dis_model \
--train_path $Path_train \
--valid_path $Path_valid \
--statistics_path $statisticsPath/ \
--epochs_path $trainPath/ \
--willy_test 0 \
--do_resize 1
!cp -R ./epochs/ ../drive/My\ Drive/
!cp -R ./training_results ../drive/My\ Drive/
!cp -R ./training_results ../drive/My\ Drive/
!rm *.bmp
```
|
github_jupyter
|
import os
from google.colab import drive
drive.mount('/drive')
os.symlink('/drive/My Drive', '/content/drive')
!ls -l /content/drive/
!apt-get install -y -qq software-properties-common python-software-properties module-init-tools
!add-apt-repository -y ppa:alessandro-strada/ppa 2>&1 > /dev/null
!apt-get update -qq 2>&1 > /dev/null
!apt-get -y install -qq google-drive-ocamlfuse fuse
from google.colab import auth
auth.authenticate_user()
from oauth2client.client import GoogleCredentials
creds = GoogleCredentials.get_application_default()
import getpass
!google-drive-ocamlfuse -headless -id={creds.client_id} -secret={creds.client_secret} < /dev/null 2>&1 | grep URL
vcode = getpass.getpass()
!echo {vcode} | google-drive-ocamlfuse -headless -id={creds.client_id} -secret={creds.client_secret}
import datetime
import json
import os
import pprint
import random
import string
import sys
import tensorflow as tf
# memory footprint support libraries/code
!ln -sf /opt/bin/nvidia-smi /usr/bin/nvidia-smi
!pip install gputil
!pip install psutil
!pip install humanize
import psutil
import humanize
import os
import GPUtil as GPU
GPUs = GPU.getGPUs()
# XXX: only one GPU on Colab and isn’t guaranteed
gpu = GPUs[0]
def printm():
process = psutil.Process(os.getpid())
print("Gen RAM Free: " + humanize.naturalsize( psutil.virtual_memory().available ), " | Proc size: " + humanize.naturalsize( process.memory_info().rss))
print("GPU RAM Free: {0:.0f}MB | Used: {1:.0f}MB | Util {2:3.0f}% | Total {3:.0f}MB".format(gpu.memoryFree, gpu.memoryUsed, gpu.memoryUtil*100, gpu.memoryTotal))
printm()
import numpy as np
from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D
from keras.layers import Dropout, Flatten, Dense
from keras.models import Sequential
from keras.layers.advanced_activations import LeakyReLU
from keras.datasets import cifar10
(train_features, train_labels), (test_features, test_labels) = cifar10.load_data()
model = Sequential()
model.add(Conv2D(filters=16, kernel_size=(2, 2), padding="same", activation="relu", input_shape=(train_features.shape[1:])))
model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
model.add(Conv2D(filters=32, kernel_size=(3, 3), padding="same", activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
model.add(Conv2D(filters=64, kernel_size=(4, 4), padding="same", activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
model.add(Flatten())
model.add(Dense(25600, activation="relu"))
model.add(Dense(25600, activation="relu"))
model.add(Dense(25600, activation="relu"))
model.add(Dense(25600, activation="relu"))
model.add(Dense(10, activation="softmax"))
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(train_features, train_labels, validation_split=0.2, epochs=10, batch_size=128, verbose=1)
!ps ax | grep python
!nvidia-smi
!kill -9 -1
# Load the Drive helper and mount
from google.colab import drive
# This will prompt for authorization.
drive.mount('/content/drive')
import sys
!test -d SRGAN || git clone https://github.com/leftthomas/SRGAN
if not 'SRGAN' in sys.path:
sys.path += ['SRGAN']
%cd SRGAN/
!pwd
import sys
!test -d SRGAN_Test || git clone https://github.com/goldenbili/SRGAN_Test.git
if not 'SRGAN_Test' in sys.path:
sys.path += ['SRGAN_Test']
%cd SRGAN_Test/
!pwd
!wget http://data.vision.ee.ethz.ch/cvl/DIV2K/DIV2K_train_HR.zip
!wget http://data.vision.ee.ethz.ch/cvl/DIV2K/DIV2K_valid_HR.zip
!unzip -q DIV2K_train_HR.zip -d data
!unzip -q DIV2K_valid_HR.zip -d data
!rm DIV2K_train_HR.zip
!rm DIV2K_valid_HR.zip
!cp -r data/DIV2K_train_HR /content/drive/MyDrive/SRGAN/data/DIV2K_train_HR
!cp -r data/DIV2K_valid_HR /content/drive/MyDrive/SRGAN/data/DIV2K_valid_HR
Path_drive_Base = "/content/drive/SRGAN/"
Path_data_Base = "/content/drive/train_image/"
Path_dataInput_Base = "/content/SRGAN_Test/data/"
# Path_data_Base = "/content/SRGAN_Test/data/"
Path_snapshot = Path_drive_Base + "snapshots/"
Gan_model = 'netG_epoch_8.pth'
Dis_model = 'netD_epoch_8.pth'
!mkdir data
Sub_Train = "02**" #@param {type:"string"}
Sub_Valid = "080*" #@param {type:"string"}
Source_Train = Path_data_Base + "DIV2K_train_HR/" + Sub_Train + ".png"
Source_Valid = Path_data_Base + "DIV2K_valid_HR/" + Sub_Valid + ".png"
Path_train = Path_dataInput_Base + "DIV2K_train_" + Sub_Train
Path_valid = Path_dataInput_Base + "DIV2K_valid_" + Sub_Valid
!mkdir $Path_train
!mkdir $Path_valid
Path_train = Path_train + "/"
Path_valid = Path_valid + "/"
# Init Date time
from datetime import datetime
now = datetime.now()
Time_Info = datetime.strftime(now,'%Y-%m-%d_%H:%M:%S')
trainPath = Path_drive_Base + "training_results/Train_"+ Sub_Train + "_Valid_" + Sub_Valid
!mkdir $trainPath
trainPath = trainPath + '/' + datetime.strftime(now,'%Y-%m-%d_%H:%M:%S')
!mkdir $trainPath
trainPath = trainPath + '/'
statisticsPath = Path_drive_Base + "statistics/Train_"+ Sub_Train + "_Valid_" + Sub_Valid
!mkdir $statisticsPath
statisticsPath = statisticsPath + '/' + datetime.strftime(now,'%Y-%m-%d_%H:%M:%S')
!mkdir $statisticsPath
statisticsPath = statisticsPath + '/'
!cp $Source_Train $Path_train
!cp $Source_Valid $Path_valid
!cp ../drive/My\ Drive/SRGAN/DIV2K_train_HR.zip ./data/
!cp ../drive/My\ Drive/SRGAN/DIV2K_valid_HR.zip ./data/
!unzip -q ./data/DIV2K_train_HR.zip -d data
!unzip -q ./data/DIV2K_valid_HR.zip -d data
!rm ./data/DIV2K_train_HR.zip
!rm ./data/DIV2K_valid_HR.zip
!python -c "import torch; print(torch.__version__)"
!python -c "import torchvision; print(torchvision.__version__)"
!pip install torchvision==0.7
!pip install torch==1.6
!pip install cudatoolkit=10.1
! python train.py \
--num_epochs=5 \
--use_cuda=1 \
--batch_size=1 \
--snapshots_folder=$Path_snapshot \
--snapshots_Gan=$Gan_model \
--snapshots_Dis=$Dis_model \
--train_path=$Path_train \
--valid_path=$Path_valid \
--statistics=path $statisticsPath/ \
--epochs_path=$trainPath/ \
--willy_test=0 \
--do_resize=1
print('$Path_snapshot:')
print(Path_snapshot)
print('$Gan_model')
print(Gan_model)
print('$Dis_model')
print(Dis_model)
print('$Path_train')
print(Path_train)
print('$Path_valid')
print(Path_valid)
print('$statisticsPath:')
print(statisticsPath)
print('$trainPath:')
print(trainPath)
! python train.py \
--num_epochs 5 \
--use_cuda 1 \
--batch_size 1 \
--snapshots_folder $Path_snapshot \
--snapshots_Gan $Gan_model \
--snapshots_Dis $Dis_model \
--train_path $Path_train \
--valid_path $Path_valid \
--statistics_path $statisticsPath/ \
--epochs_path $trainPath/ \
--willy_test 0 \
--do_resize 1
!cp -R ./epochs/ ../drive/My\ Drive/
!cp -R ./training_results ../drive/My\ Drive/
!cp -R ./training_results ../drive/My\ Drive/
!rm *.bmp
| 0.431824 | 0.145176 |
# Working with data from the HPF Spectrograph
by Michael Gully-Santiago & Jessica Luna
`muler` has new *beta support* for the [Habitable Zone Planet Finder Spectrograph](https://hpf.psu.edu/) (HPF). In this tutorial we show the basic usage of how to read, process, and plot data from HPF. We currently support data from either the Goldilocks pipeline or HPF Instrument Team pipeline.
```
from muler.hpf import HPFSpectrum, HPFSpectrumList
import numpy as np
import glob
%config InlineBackend.figure_format='retina'
```
In order to use `muler` with HPF data, you need to have some spectra on your computer. Here we have Goldilocks spectra:
```
local_files = glob.glob("../../tests/data/Goldilocks_*.spectra.fits")
file = local_files[0]
```
We can easily read in HPF data for a specific spectral order:
```
original_spectrum = HPFSpectrum(file=file, order=15)
```
The spectrum has physical units:
```
original_spectrum.wavelength # "Angstroms"
original_spectrum.flux.unit # "counts"
```
We can normalize the spectrum, which divides the spectrum by the median value, rendering the flux units *dimensionless*
```
spectrum = original_spectrum.normalize()
np.nanmedian(spectrum.flux)
```
We can effortlessly subtract the sky emission from the target fiber.
```
sky_free_spectrum = spectrum.sky_subtract()
```
Now we can normalize and overplot plot the observed spectrum, sky subtracted spectrum, and the sky emission itself:
```
ax = spectrum.plot(label='Observed spectrum', color='k')
spectrum.sky.plot(ax=ax, label='Sky spectrum')
sky_free_spectrum.plot(ax=ax, label='Observed - Sky', lw=0.5)
ax.legend(ncol=3); ax.set_ylim(0.0, 1.5);
```
Nice! We have a sky subtracted spectrum! Let's remove the instrumental response function from this sky subtracted spectrum. The instrumental response is dominated by the characteristic concave-down shape. This conspicuous parabola-like curve stems from an optical device called an [echelle grating](https://en.wikipedia.org/wiki/Echelle_grating) and its related "blaze function". Accordingly the process of removing this shape is sometimes referred to by the names "de-blazing", "flattening", or "normalizing". In this tutorial we will stick with the term "deblaze".
We first want to remove the `NaN` values at the edges, then apply the spline division:
```
deblazed_spectrum = sky_free_spectrum.remove_nans().blaze_divide_spline()
ax = deblazed_spectrum.normalize().plot(label='Deblazed by spline')
ax.axhline(1.0, linestyle='dashed', color='k')
ax.set_ylim(0.5, 1.2); ax.legend();
```
Great! We have achieved our goal: sky subtracted and deblazed target spectrum ready for analysis.
You can see that the spline division is not perfect because some broad line wings can be mistaken as part of the blaze shape. `muler` has experimental support for a different type of deblazing based on high-fidelity [flat-field](https://en.wikipedia.org/wiki/Flat-field_correction) spectra. Those experimental techniques currently require ancillary calibration files that are not provided with our git repo. Check back in for future updates!
|
github_jupyter
|
from muler.hpf import HPFSpectrum, HPFSpectrumList
import numpy as np
import glob
%config InlineBackend.figure_format='retina'
local_files = glob.glob("../../tests/data/Goldilocks_*.spectra.fits")
file = local_files[0]
original_spectrum = HPFSpectrum(file=file, order=15)
original_spectrum.wavelength # "Angstroms"
original_spectrum.flux.unit # "counts"
spectrum = original_spectrum.normalize()
np.nanmedian(spectrum.flux)
sky_free_spectrum = spectrum.sky_subtract()
ax = spectrum.plot(label='Observed spectrum', color='k')
spectrum.sky.plot(ax=ax, label='Sky spectrum')
sky_free_spectrum.plot(ax=ax, label='Observed - Sky', lw=0.5)
ax.legend(ncol=3); ax.set_ylim(0.0, 1.5);
deblazed_spectrum = sky_free_spectrum.remove_nans().blaze_divide_spline()
ax = deblazed_spectrum.normalize().plot(label='Deblazed by spline')
ax.axhline(1.0, linestyle='dashed', color='k')
ax.set_ylim(0.5, 1.2); ax.legend();
| 0.657209 | 0.985923 |
```
%pylab inline
import glob
from tqdm import tqdm
from skimage.io import imread
from skimage.filters import try_all_threshold
from skimage.color import rgb2gray
from pyvirchow.morphology.operations import open_close
from pyvirchow.morphology.operations import close_open
from skimage import data, color, img_as_ubyte
from skimage.feature import canny
from skimage.transform import hough_ellipse
from skimage.draw import ellipse_perimeter
from skimage.filters import roberts, sobel, scharr, prewitt
from skimage.morphology import erosion, dilation, opening, closing, white_tophat
from skimage.morphology import black_tophat, skeletonize, convex_hull_image
from skimage.morphology import disk,
normal_patches_dir = '../normal_patches_test/level_0/'
tumor_patches_dir = '../tumor_patches_test/level_0/'
np.random.seed(42)
master_matrix = []
label_matrix = []
y = []
list_of_tumor_files = list(glob.glob('{}*.png'.format(tumor_patches_dir)))
list_of_tumor_files = np.random.choice(list_of_tumor_files, 5000)
for f in tqdm(list_of_tumor_files):
master_matrix.append(imread(f))
label_matrix.append('tumor')
y.append(1)
tumor_count = len(label_matrix)
list_of_normal_files = list(glob.glob('{}*.png'.format(normal_patches_dir)))
list_of_normal_files = np.random.choice(list_of_normal_files, 5000)
#np.random.shuffle(list_of_normal_files)
for f in tqdm(list_of_normal_files):
master_matrix.append(imread(f))
label_matrix.append('normal')
y.append(0)
master_matrix = np.array(master_matrix)
y=np.array(y)
fig, ax = try_all_threshold(rgb2gray(master_matrix[0]), figsize=(10, 8), verbose=False)
fig, ax = try_all_threshold(rgb2gray(master_matrix[-1]), figsize=(10, 8), verbose=False)
fig, ax = try_all_threshold(close_open(rgb2gray(master_matrix[1]), open_kernel_size=5, close_kernel_size=5),
figsize=(10, 8), verbose=False)
y[-1]
image_rgb = master_matrix[0]
image_gray = rgb2gray(image_rgb)
edges = canny(image_gray, sigma=2.0,
low_threshold=0.35, high_threshold=0.8)
fig2, (ax1, ax2, ax3) = plt.subplots(ncols=3, nrows=1, figsize=(8, 4),
sharex=True, sharey=True)
ax1.set_title('Original picture')
ax1.imshow(image_rgb)
ax2.set_title('Canny')
ax2.imshow(edges, cmap='gray')
selem = disk(3)
ax3.set_title('Closing on canny')
ax3.imshow(closing(edges, selem=selem), cmap='gray')
# Perform a Hough Transform
# The accuracy corresponds to the bin size of a major axis.
# The value is chosen in order to get a single high accumulator.
# The threshold eliminates low accumulators
result = hough_ellipse(edges, accuracy=1, threshold=250,
min_size=2, max_size=5)
result.sort(order='accumulator')
# Estimated parameters for the ellipse
best = list(result[-1])
yc, xc, a, b = [int(round(x)) for x in best[1:5]]
orientation = best[5]
# Draw the ellipse on the original image
cy, cx = ellipse_perimeter(yc, xc, a, b, orientation)
image_rgb[cy, cx] = (0, 0, 255)
# Draw the edge (white) and the resulting ellipse (red)
edges = color.gray2rgb(img_as_ubyte(edges))
edges[cy, cx] = (250, 0, 0)
fig2, (ax1, ax2) = plt.subplots(ncols=2, nrows=1, figsize=(8, 4),
sharex=True, sharey=True)
ax1.set_title('Original picture')
ax1.imshow(image_rgb)
ax2.set_title('Edge (white) and result (red)')
ax2.imshow(edges)
!python ../utils/process_images.py --image_files \
"/Z/personal-folders/interns/saket/github/pyvirchow/normal_tumor_combined_level0/*/*.png" --novalidate_images \
--clusters 2 --layout fitsne --output_folder ../normal_tumour_combined_level0_output
```
|
github_jupyter
|
%pylab inline
import glob
from tqdm import tqdm
from skimage.io import imread
from skimage.filters import try_all_threshold
from skimage.color import rgb2gray
from pyvirchow.morphology.operations import open_close
from pyvirchow.morphology.operations import close_open
from skimage import data, color, img_as_ubyte
from skimage.feature import canny
from skimage.transform import hough_ellipse
from skimage.draw import ellipse_perimeter
from skimage.filters import roberts, sobel, scharr, prewitt
from skimage.morphology import erosion, dilation, opening, closing, white_tophat
from skimage.morphology import black_tophat, skeletonize, convex_hull_image
from skimage.morphology import disk,
normal_patches_dir = '../normal_patches_test/level_0/'
tumor_patches_dir = '../tumor_patches_test/level_0/'
np.random.seed(42)
master_matrix = []
label_matrix = []
y = []
list_of_tumor_files = list(glob.glob('{}*.png'.format(tumor_patches_dir)))
list_of_tumor_files = np.random.choice(list_of_tumor_files, 5000)
for f in tqdm(list_of_tumor_files):
master_matrix.append(imread(f))
label_matrix.append('tumor')
y.append(1)
tumor_count = len(label_matrix)
list_of_normal_files = list(glob.glob('{}*.png'.format(normal_patches_dir)))
list_of_normal_files = np.random.choice(list_of_normal_files, 5000)
#np.random.shuffle(list_of_normal_files)
for f in tqdm(list_of_normal_files):
master_matrix.append(imread(f))
label_matrix.append('normal')
y.append(0)
master_matrix = np.array(master_matrix)
y=np.array(y)
fig, ax = try_all_threshold(rgb2gray(master_matrix[0]), figsize=(10, 8), verbose=False)
fig, ax = try_all_threshold(rgb2gray(master_matrix[-1]), figsize=(10, 8), verbose=False)
fig, ax = try_all_threshold(close_open(rgb2gray(master_matrix[1]), open_kernel_size=5, close_kernel_size=5),
figsize=(10, 8), verbose=False)
y[-1]
image_rgb = master_matrix[0]
image_gray = rgb2gray(image_rgb)
edges = canny(image_gray, sigma=2.0,
low_threshold=0.35, high_threshold=0.8)
fig2, (ax1, ax2, ax3) = plt.subplots(ncols=3, nrows=1, figsize=(8, 4),
sharex=True, sharey=True)
ax1.set_title('Original picture')
ax1.imshow(image_rgb)
ax2.set_title('Canny')
ax2.imshow(edges, cmap='gray')
selem = disk(3)
ax3.set_title('Closing on canny')
ax3.imshow(closing(edges, selem=selem), cmap='gray')
# Perform a Hough Transform
# The accuracy corresponds to the bin size of a major axis.
# The value is chosen in order to get a single high accumulator.
# The threshold eliminates low accumulators
result = hough_ellipse(edges, accuracy=1, threshold=250,
min_size=2, max_size=5)
result.sort(order='accumulator')
# Estimated parameters for the ellipse
best = list(result[-1])
yc, xc, a, b = [int(round(x)) for x in best[1:5]]
orientation = best[5]
# Draw the ellipse on the original image
cy, cx = ellipse_perimeter(yc, xc, a, b, orientation)
image_rgb[cy, cx] = (0, 0, 255)
# Draw the edge (white) and the resulting ellipse (red)
edges = color.gray2rgb(img_as_ubyte(edges))
edges[cy, cx] = (250, 0, 0)
fig2, (ax1, ax2) = plt.subplots(ncols=2, nrows=1, figsize=(8, 4),
sharex=True, sharey=True)
ax1.set_title('Original picture')
ax1.imshow(image_rgb)
ax2.set_title('Edge (white) and result (red)')
ax2.imshow(edges)
!python ../utils/process_images.py --image_files \
"/Z/personal-folders/interns/saket/github/pyvirchow/normal_tumor_combined_level0/*/*.png" --novalidate_images \
--clusters 2 --layout fitsne --output_folder ../normal_tumour_combined_level0_output
| 0.595022 | 0.330188 |
# 10.12 机器翻译
```
import collections
import os
import io
import math
import torch
from torch import nn
import torch.nn.functional as F
import torchtext.vocab as Vocab
import torch.utils.data as Data
import sys
sys.path.append("..")
import d2lzh_pytorch as d2l
PAD, BOS, EOS = '<pad>', '<bos>', '<eos>'
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(torch.__version__, device)
import inspect
print(inspect.getsource(d2l.evaluate_accuracy))
```
## 10.12.1 读取和预处理数据
```
# 将一个序列中所有的词记录在all_tokens中以便之后构造词典,然后在该序列后面添加PAD直到序列
# 长度变为max_seq_len,然后将序列保存在all_seqs中
def process_one_seq(seq_tokens, all_tokens, all_seqs, max_seq_len):
all_tokens.extend(seq_tokens)
seq_tokens += [EOS] + [PAD] * (max_seq_len - len(seq_tokens) - 1)
all_seqs.append(seq_tokens)
# 使用所有的词来构造词典。并将所有序列中的词变换为词索引后构造Tensor
def build_data(all_tokens, all_seqs):
vocab = Vocab.Vocab(collections.Counter(all_tokens),
specials=[PAD, BOS, EOS])
indices = [[vocab.stoi[w] for w in seq] for seq in all_seqs]
return vocab, torch.tensor(indices)
def read_data(max_seq_len):
# in和out分别是input和output的缩写
in_tokens, out_tokens, in_seqs, out_seqs = [], [], [], []
with io.open('../../data/fr-en-small.txt') as f:
lines = f.readlines()
for line in lines:
in_seq, out_seq = line.rstrip().split('\t')
in_seq_tokens, out_seq_tokens = in_seq.split(' '), out_seq.split(' ')
if max(len(in_seq_tokens), len(out_seq_tokens)) > max_seq_len - 1:
continue # 如果加上EOS后长于max_seq_len,则忽略掉此样本
process_one_seq(in_seq_tokens, in_tokens, in_seqs, max_seq_len)
process_one_seq(out_seq_tokens, out_tokens, out_seqs, max_seq_len)
in_vocab, in_data = build_data(in_tokens, in_seqs)
out_vocab, out_data = build_data(out_tokens, out_seqs)
return in_vocab, out_vocab, Data.TensorDataset(in_data, out_data)
max_seq_len = 7
in_vocab, out_vocab, dataset = read_data(max_seq_len)
dataset[0]
```
## 10.12.2 含注意力机制的编码器—解码器
### 10.12.2.1 编码器
```
class Encoder(nn.Module):
def __init__(self, vocab_size, embed_size, num_hiddens, num_layers,
drop_prob=0, **kwargs):
super(Encoder, self).__init__(**kwargs)
self.embedding = nn.Embedding(vocab_size, embed_size)
self.rnn = nn.GRU(embed_size, num_hiddens, num_layers, dropout=drop_prob)
def forward(self, inputs, state):
# 输入形状是(批量大小, 时间步数)。将输出互换样本维和时间步维
embedding = self.embedding(inputs.long()).permute(1, 0, 2) # (seq_len, batch, input_size)
return self.rnn(embedding, state)
def begin_state(self):
return None
encoder = Encoder(vocab_size=10, embed_size=8, num_hiddens=16, num_layers=2)
output, state = encoder(torch.zeros((4, 7)), encoder.begin_state())
output.shape, state.shape # GRU的state是h, 而LSTM的是一个元组(h, c)
```
### 10.12.2.2 注意力机制
```
def attention_model(input_size, attention_size):
model = nn.Sequential(nn.Linear(input_size, attention_size, bias=False),
nn.Tanh(),
nn.Linear(attention_size, 1, bias=False))
return model
def attention_forward(model, enc_states, dec_state):
"""
enc_states: (时间步数, 批量大小, 隐藏单元个数)
dec_state: (批量大小, 隐藏单元个数)
"""
# 将解码器隐藏状态广播到和编码器隐藏状态形状相同后进行连结
dec_states = dec_state.unsqueeze(dim=0).expand_as(enc_states)
enc_and_dec_states = torch.cat((enc_states, dec_states), dim=2)
e = model(enc_and_dec_states) # 形状为(时间步数, 批量大小, 1)
alpha = F.softmax(e, dim=0) # 在时间步维度做softmax运算
return (alpha * enc_states).sum(dim=0) # 返回背景变量
seq_len, batch_size, num_hiddens = 10, 4, 8
model = attention_model(2*num_hiddens, 10)
enc_states = torch.zeros((seq_len, batch_size, num_hiddens))
dec_state = torch.zeros((batch_size, num_hiddens))
attention_forward(model, enc_states, dec_state).shape
```
### 10.12.2.3 含注意力机制的解码器
```
class Decoder(nn.Module):
def __init__(self, vocab_size, embed_size, num_hiddens, num_layers,
attention_size, drop_prob=0):
super(Decoder, self).__init__()
self.embedding = nn.Embedding(vocab_size, embed_size)
self.attention = attention_model(2*num_hiddens, attention_size)
# GRU的输入包含attention输出的c和实际输入, 所以尺寸是 num_hiddens+embed_size
self.rnn = nn.GRU(num_hiddens + embed_size, num_hiddens,
num_layers, dropout=drop_prob)
self.out = nn.Linear(num_hiddens, vocab_size)
def forward(self, cur_input, state, enc_states):
"""
cur_input shape: (batch, )
state shape: (num_layers, batch, num_hiddens)
"""
# 使用注意力机制计算背景向量
c = attention_forward(self.attention, enc_states, state[-1])
# 将嵌入后的输入和背景向量在特征维连结, (批量大小, num_hiddens+embed_size)
input_and_c = torch.cat((self.embedding(cur_input), c), dim=1)
# 为输入和背景向量的连结增加时间步维,时间步个数为1
output, state = self.rnn(input_and_c.unsqueeze(0), state)
# 移除时间步维,输出形状为(批量大小, 输出词典大小)
output = self.out(output).squeeze(dim=0)
return output, state
def begin_state(self, enc_state):
# 直接将编码器最终时间步的隐藏状态作为解码器的初始隐藏状态
return enc_state
```
## 10.12.3 训练模型
```
def batch_loss(encoder, decoder, X, Y, loss):
batch_size = X.shape[0]
enc_state = encoder.begin_state()
enc_outputs, enc_state = encoder(X, enc_state)
# 初始化解码器的隐藏状态
dec_state = decoder.begin_state(enc_state)
# 解码器在最初时间步的输入是BOS
dec_input = torch.tensor([out_vocab.stoi[BOS]] * batch_size)
# 我们将使用掩码变量mask来忽略掉标签为填充项PAD的损失, 初始全1
mask, num_not_pad_tokens = torch.ones(batch_size,), 0
l = torch.tensor([0.0])
for y in Y.permute(1,0): # Y shape: (batch, seq_len)
dec_output, dec_state = decoder(dec_input, dec_state, enc_outputs)
l = l + (mask * loss(dec_output, y)).sum()
dec_input = y # 使用强制教学
num_not_pad_tokens += mask.sum().item()
# EOS后面全是PAD. 下面一行保证一旦遇到EOS接下来的循环中mask就一直是0
mask = mask * (y != out_vocab.stoi[EOS]).float()
return l / num_not_pad_tokens
def train(encoder, decoder, dataset, lr, batch_size, num_epochs):
enc_optimizer = torch.optim.Adam(encoder.parameters(), lr=lr)
dec_optimizer = torch.optim.Adam(decoder.parameters(), lr=lr)
loss = nn.CrossEntropyLoss(reduction='none')
data_iter = Data.DataLoader(dataset, batch_size, shuffle=True)
for epoch in range(num_epochs):
l_sum = 0.0
for X, Y in data_iter:
enc_optimizer.zero_grad()
dec_optimizer.zero_grad()
l = batch_loss(encoder, decoder, X, Y, loss)
l.backward()
enc_optimizer.step()
dec_optimizer.step()
l_sum += l.item()
if (epoch + 1) % 10 == 0:
print("epoch %d, loss %.3f" % (epoch + 1, l_sum / len(data_iter)))
embed_size, num_hiddens, num_layers = 64, 64, 2
attention_size, drop_prob, lr, batch_size, num_epochs = 10, 0.5, 0.01, 2, 50
encoder = Encoder(len(in_vocab), embed_size, num_hiddens, num_layers,
drop_prob)
decoder = Decoder(len(out_vocab), embed_size, num_hiddens, num_layers,
attention_size, drop_prob)
train(encoder, decoder, dataset, lr, batch_size, num_epochs)
```
## 10.12.4 预测不定长的序列
```
def translate(encoder, decoder, input_seq, max_seq_len):
in_tokens = input_seq.split(' ')
in_tokens += [EOS] + [PAD] * (max_seq_len - len(in_tokens) - 1)
enc_input = torch.tensor([[in_vocab.stoi[tk] for tk in in_tokens]]) # batch=1
enc_state = encoder.begin_state()
enc_output, enc_state = encoder(enc_input, enc_state)
dec_input = torch.tensor([out_vocab.stoi[BOS]])
dec_state = decoder.begin_state(enc_state)
output_tokens = []
for _ in range(max_seq_len):
dec_output, dec_state = decoder(dec_input, dec_state, enc_output)
pred = dec_output.argmax(dim=1)
pred_token = out_vocab.itos[int(pred.item())]
if pred_token == EOS: # 当任一时间步搜索出EOS时,输出序列即完成
break
else:
output_tokens.append(pred_token)
dec_input = pred
return output_tokens
input_seq = 'ils regardent .'
translate(encoder, decoder, input_seq, max_seq_len)
```
## 10.12.5 评价翻译结果
```
def bleu(pred_tokens, label_tokens, k):
len_pred, len_label = len(pred_tokens), len(label_tokens)
score = math.exp(min(0, 1 - len_label / len_pred))
for n in range(1, k + 1):
num_matches, label_subs = 0, collections.defaultdict(int)
for i in range(len_label - n + 1):
label_subs[''.join(label_tokens[i: i + n])] += 1
for i in range(len_pred - n + 1):
if label_subs[''.join(pred_tokens[i: i + n])] > 0:
num_matches += 1
label_subs[''.join(pred_tokens[i: i + n])] -= 1
score *= math.pow(num_matches / (len_pred - n + 1), math.pow(0.5, n))
return score
def score(input_seq, label_seq, k):
pred_tokens = translate(encoder, decoder, input_seq, max_seq_len)
label_tokens = label_seq.split(' ')
print('bleu %.3f, predict: %s' % (bleu(pred_tokens, label_tokens, k),
' '.join(pred_tokens)))
score('ils regardent .', 'they are watching .', k=2)
score('ils sont canadienne .', 'they are canadian .', k=2)
```
|
github_jupyter
|
import collections
import os
import io
import math
import torch
from torch import nn
import torch.nn.functional as F
import torchtext.vocab as Vocab
import torch.utils.data as Data
import sys
sys.path.append("..")
import d2lzh_pytorch as d2l
PAD, BOS, EOS = '<pad>', '<bos>', '<eos>'
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(torch.__version__, device)
import inspect
print(inspect.getsource(d2l.evaluate_accuracy))
# 将一个序列中所有的词记录在all_tokens中以便之后构造词典,然后在该序列后面添加PAD直到序列
# 长度变为max_seq_len,然后将序列保存在all_seqs中
def process_one_seq(seq_tokens, all_tokens, all_seqs, max_seq_len):
all_tokens.extend(seq_tokens)
seq_tokens += [EOS] + [PAD] * (max_seq_len - len(seq_tokens) - 1)
all_seqs.append(seq_tokens)
# 使用所有的词来构造词典。并将所有序列中的词变换为词索引后构造Tensor
def build_data(all_tokens, all_seqs):
vocab = Vocab.Vocab(collections.Counter(all_tokens),
specials=[PAD, BOS, EOS])
indices = [[vocab.stoi[w] for w in seq] for seq in all_seqs]
return vocab, torch.tensor(indices)
def read_data(max_seq_len):
# in和out分别是input和output的缩写
in_tokens, out_tokens, in_seqs, out_seqs = [], [], [], []
with io.open('../../data/fr-en-small.txt') as f:
lines = f.readlines()
for line in lines:
in_seq, out_seq = line.rstrip().split('\t')
in_seq_tokens, out_seq_tokens = in_seq.split(' '), out_seq.split(' ')
if max(len(in_seq_tokens), len(out_seq_tokens)) > max_seq_len - 1:
continue # 如果加上EOS后长于max_seq_len,则忽略掉此样本
process_one_seq(in_seq_tokens, in_tokens, in_seqs, max_seq_len)
process_one_seq(out_seq_tokens, out_tokens, out_seqs, max_seq_len)
in_vocab, in_data = build_data(in_tokens, in_seqs)
out_vocab, out_data = build_data(out_tokens, out_seqs)
return in_vocab, out_vocab, Data.TensorDataset(in_data, out_data)
max_seq_len = 7
in_vocab, out_vocab, dataset = read_data(max_seq_len)
dataset[0]
class Encoder(nn.Module):
def __init__(self, vocab_size, embed_size, num_hiddens, num_layers,
drop_prob=0, **kwargs):
super(Encoder, self).__init__(**kwargs)
self.embedding = nn.Embedding(vocab_size, embed_size)
self.rnn = nn.GRU(embed_size, num_hiddens, num_layers, dropout=drop_prob)
def forward(self, inputs, state):
# 输入形状是(批量大小, 时间步数)。将输出互换样本维和时间步维
embedding = self.embedding(inputs.long()).permute(1, 0, 2) # (seq_len, batch, input_size)
return self.rnn(embedding, state)
def begin_state(self):
return None
encoder = Encoder(vocab_size=10, embed_size=8, num_hiddens=16, num_layers=2)
output, state = encoder(torch.zeros((4, 7)), encoder.begin_state())
output.shape, state.shape # GRU的state是h, 而LSTM的是一个元组(h, c)
def attention_model(input_size, attention_size):
model = nn.Sequential(nn.Linear(input_size, attention_size, bias=False),
nn.Tanh(),
nn.Linear(attention_size, 1, bias=False))
return model
def attention_forward(model, enc_states, dec_state):
"""
enc_states: (时间步数, 批量大小, 隐藏单元个数)
dec_state: (批量大小, 隐藏单元个数)
"""
# 将解码器隐藏状态广播到和编码器隐藏状态形状相同后进行连结
dec_states = dec_state.unsqueeze(dim=0).expand_as(enc_states)
enc_and_dec_states = torch.cat((enc_states, dec_states), dim=2)
e = model(enc_and_dec_states) # 形状为(时间步数, 批量大小, 1)
alpha = F.softmax(e, dim=0) # 在时间步维度做softmax运算
return (alpha * enc_states).sum(dim=0) # 返回背景变量
seq_len, batch_size, num_hiddens = 10, 4, 8
model = attention_model(2*num_hiddens, 10)
enc_states = torch.zeros((seq_len, batch_size, num_hiddens))
dec_state = torch.zeros((batch_size, num_hiddens))
attention_forward(model, enc_states, dec_state).shape
class Decoder(nn.Module):
def __init__(self, vocab_size, embed_size, num_hiddens, num_layers,
attention_size, drop_prob=0):
super(Decoder, self).__init__()
self.embedding = nn.Embedding(vocab_size, embed_size)
self.attention = attention_model(2*num_hiddens, attention_size)
# GRU的输入包含attention输出的c和实际输入, 所以尺寸是 num_hiddens+embed_size
self.rnn = nn.GRU(num_hiddens + embed_size, num_hiddens,
num_layers, dropout=drop_prob)
self.out = nn.Linear(num_hiddens, vocab_size)
def forward(self, cur_input, state, enc_states):
"""
cur_input shape: (batch, )
state shape: (num_layers, batch, num_hiddens)
"""
# 使用注意力机制计算背景向量
c = attention_forward(self.attention, enc_states, state[-1])
# 将嵌入后的输入和背景向量在特征维连结, (批量大小, num_hiddens+embed_size)
input_and_c = torch.cat((self.embedding(cur_input), c), dim=1)
# 为输入和背景向量的连结增加时间步维,时间步个数为1
output, state = self.rnn(input_and_c.unsqueeze(0), state)
# 移除时间步维,输出形状为(批量大小, 输出词典大小)
output = self.out(output).squeeze(dim=0)
return output, state
def begin_state(self, enc_state):
# 直接将编码器最终时间步的隐藏状态作为解码器的初始隐藏状态
return enc_state
def batch_loss(encoder, decoder, X, Y, loss):
batch_size = X.shape[0]
enc_state = encoder.begin_state()
enc_outputs, enc_state = encoder(X, enc_state)
# 初始化解码器的隐藏状态
dec_state = decoder.begin_state(enc_state)
# 解码器在最初时间步的输入是BOS
dec_input = torch.tensor([out_vocab.stoi[BOS]] * batch_size)
# 我们将使用掩码变量mask来忽略掉标签为填充项PAD的损失, 初始全1
mask, num_not_pad_tokens = torch.ones(batch_size,), 0
l = torch.tensor([0.0])
for y in Y.permute(1,0): # Y shape: (batch, seq_len)
dec_output, dec_state = decoder(dec_input, dec_state, enc_outputs)
l = l + (mask * loss(dec_output, y)).sum()
dec_input = y # 使用强制教学
num_not_pad_tokens += mask.sum().item()
# EOS后面全是PAD. 下面一行保证一旦遇到EOS接下来的循环中mask就一直是0
mask = mask * (y != out_vocab.stoi[EOS]).float()
return l / num_not_pad_tokens
def train(encoder, decoder, dataset, lr, batch_size, num_epochs):
enc_optimizer = torch.optim.Adam(encoder.parameters(), lr=lr)
dec_optimizer = torch.optim.Adam(decoder.parameters(), lr=lr)
loss = nn.CrossEntropyLoss(reduction='none')
data_iter = Data.DataLoader(dataset, batch_size, shuffle=True)
for epoch in range(num_epochs):
l_sum = 0.0
for X, Y in data_iter:
enc_optimizer.zero_grad()
dec_optimizer.zero_grad()
l = batch_loss(encoder, decoder, X, Y, loss)
l.backward()
enc_optimizer.step()
dec_optimizer.step()
l_sum += l.item()
if (epoch + 1) % 10 == 0:
print("epoch %d, loss %.3f" % (epoch + 1, l_sum / len(data_iter)))
embed_size, num_hiddens, num_layers = 64, 64, 2
attention_size, drop_prob, lr, batch_size, num_epochs = 10, 0.5, 0.01, 2, 50
encoder = Encoder(len(in_vocab), embed_size, num_hiddens, num_layers,
drop_prob)
decoder = Decoder(len(out_vocab), embed_size, num_hiddens, num_layers,
attention_size, drop_prob)
train(encoder, decoder, dataset, lr, batch_size, num_epochs)
def translate(encoder, decoder, input_seq, max_seq_len):
in_tokens = input_seq.split(' ')
in_tokens += [EOS] + [PAD] * (max_seq_len - len(in_tokens) - 1)
enc_input = torch.tensor([[in_vocab.stoi[tk] for tk in in_tokens]]) # batch=1
enc_state = encoder.begin_state()
enc_output, enc_state = encoder(enc_input, enc_state)
dec_input = torch.tensor([out_vocab.stoi[BOS]])
dec_state = decoder.begin_state(enc_state)
output_tokens = []
for _ in range(max_seq_len):
dec_output, dec_state = decoder(dec_input, dec_state, enc_output)
pred = dec_output.argmax(dim=1)
pred_token = out_vocab.itos[int(pred.item())]
if pred_token == EOS: # 当任一时间步搜索出EOS时,输出序列即完成
break
else:
output_tokens.append(pred_token)
dec_input = pred
return output_tokens
input_seq = 'ils regardent .'
translate(encoder, decoder, input_seq, max_seq_len)
def bleu(pred_tokens, label_tokens, k):
len_pred, len_label = len(pred_tokens), len(label_tokens)
score = math.exp(min(0, 1 - len_label / len_pred))
for n in range(1, k + 1):
num_matches, label_subs = 0, collections.defaultdict(int)
for i in range(len_label - n + 1):
label_subs[''.join(label_tokens[i: i + n])] += 1
for i in range(len_pred - n + 1):
if label_subs[''.join(pred_tokens[i: i + n])] > 0:
num_matches += 1
label_subs[''.join(pred_tokens[i: i + n])] -= 1
score *= math.pow(num_matches / (len_pred - n + 1), math.pow(0.5, n))
return score
def score(input_seq, label_seq, k):
pred_tokens = translate(encoder, decoder, input_seq, max_seq_len)
label_tokens = label_seq.split(' ')
print('bleu %.3f, predict: %s' % (bleu(pred_tokens, label_tokens, k),
' '.join(pred_tokens)))
score('ils regardent .', 'they are watching .', k=2)
score('ils sont canadienne .', 'they are canadian .', k=2)
| 0.512693 | 0.681601 |
## Deliverable 3. Create a Travel Itinerary Map.
```
# Dependencies and Setup
import pandas as pd
import requests
import gmaps
# Import API key
from config import gkey
# Configure gmaps
gmaps.configure(api_key=gkey)
# 1. Read the WeatherPy_vacation.csv into a DataFrame.
vacation_df = pd.read_csv("Data/WeatherPy_vacation.csv")
vacation_df.head()
# 2. Using the template add the city name, the country code, the weather description and maximum temperature for the city.
info_box_template = """
<dl>
<dt>City</dt><dd>{City}</dd>
<dt>Country</dt><dd>{Country}</dd>
<dt>Weather Description</dt><dd>{Weather Description}</dd>
<dt>Max Temp</dt><dd>{Max Temp} °F</dd>
</dl>
"""
# 3a. Get the data from each row and add it to the formatting template and store the data in a list.
hotel_info = [info_box_template.format(**row) for index, row in vacation_df.iterrows()]
# 3b. Get the latitude and longitude from each row and store in a new DataFrame.
locations = vacation_df[["Lat", "Lng"]]
locations
# 4a. Add a marker layer for each city to the map.
marker_layer = gmaps.marker_layer(locations, info_box_content=hotel_info)
fig = gmaps.figure(center=(30.0, 31.0), zoom_level=1.5)
fig.add_layer(marker_layer)
# 4b. Display the figure
fig
# From the map above pick 4 cities and create a vacation itinerary route to travel between the four cities.
# 5. Create DataFrames for each city by filtering the 'vacation_df' using the loc method.
# Hint: The starting and ending city should be the same city.
vacation_start = vacation_df.loc[vacation_df["City"]=="Batticaloa"]
vacation_end = vacation_df.loc[vacation_df["City"]=="Batticaloa"]
vacation_stop1 = vacation_df.loc[vacation_df["City"]=="Hambantota"]
vacation_stop2 = vacation_df.loc[vacation_df["City"]=="Matara"]
vacation_stop3 = vacation_df.loc[vacation_df["City"]=="Negombo"]
# 6. Get the latitude-longitude pairs as tuples from each city DataFrame using the to_numpy function and list indexing.
start = list(vacation_start[["Lat","Lng"]].to_numpy()[0])
end = list(vacation_end[["Lat","Lng"]].to_numpy()[0])
stop1 = list(vacation_stop1[["Lat","Lng"]].to_numpy()[0])
stop2 = list(vacation_stop2[["Lat","Lng"]].to_numpy()[0])
stop3 = list(vacation_stop3[["Lat","Lng"]].to_numpy()[0])
# 7. Create a direction layer map using the start and end latitude-longitude pairs,
# and stop1, stop2, and stop3 as the waypoints. The travel_mode should be "DRIVING", "BICYCLING", or "WALKING".
fig =gmaps.figure()
vacation_itinerary = gmaps.directions_layer(start,end,
waypoints=[stop1,stop2,stop3],
travel_mode="DRIVING")
fig.add_layer(vacation_itinerary)
fig
# 8. To create a marker layer map between the four cities.
# Combine the four city DataFrames into one DataFrame using the concat() function.
itinerary_df = pd.concat([vacation_start, vacation_stop1, vacation_stop2, vacation_stop3],ignore_index=True)
itinerary_df
# 9 Using the template add city name, the country code, the weather description and maximum temperature for the city.
info_box_template = """
<dl>
<dt>City</dt><dd>{City}</dd>
<dt>Country</dt><dd>{Country}</dd>
<dt>Weather Description</dt><dd>{Weather Description}</dd>
<dt>Max Temp</dt><dd>{Max Temp} °F</dd>
</dl>
"""
# 10a Get the data from each row and add it to the formatting template and store the data in a list.
hotel_info = [info_box_template.format(**row) for index, row in itinerary_df.iterrows()]
# 10b. Get the latitude and longitude from each row and store in a new DataFrame.
locations= itinerary_df[["Lat", "Lng"]]
locations
# 11a. Add a marker layer for each city to the map.
marker_layer = gmaps.marker_layer(locations, info_box_content=hotel_info)
fig = gmaps.figure(center=(5.5, 85), zoom_level=2)
fig.add_layer(marker_layer)
# 11b. Display the figure
fig
```
|
github_jupyter
|
# Dependencies and Setup
import pandas as pd
import requests
import gmaps
# Import API key
from config import gkey
# Configure gmaps
gmaps.configure(api_key=gkey)
# 1. Read the WeatherPy_vacation.csv into a DataFrame.
vacation_df = pd.read_csv("Data/WeatherPy_vacation.csv")
vacation_df.head()
# 2. Using the template add the city name, the country code, the weather description and maximum temperature for the city.
info_box_template = """
<dl>
<dt>City</dt><dd>{City}</dd>
<dt>Country</dt><dd>{Country}</dd>
<dt>Weather Description</dt><dd>{Weather Description}</dd>
<dt>Max Temp</dt><dd>{Max Temp} °F</dd>
</dl>
"""
# 3a. Get the data from each row and add it to the formatting template and store the data in a list.
hotel_info = [info_box_template.format(**row) for index, row in vacation_df.iterrows()]
# 3b. Get the latitude and longitude from each row and store in a new DataFrame.
locations = vacation_df[["Lat", "Lng"]]
locations
# 4a. Add a marker layer for each city to the map.
marker_layer = gmaps.marker_layer(locations, info_box_content=hotel_info)
fig = gmaps.figure(center=(30.0, 31.0), zoom_level=1.5)
fig.add_layer(marker_layer)
# 4b. Display the figure
fig
# From the map above pick 4 cities and create a vacation itinerary route to travel between the four cities.
# 5. Create DataFrames for each city by filtering the 'vacation_df' using the loc method.
# Hint: The starting and ending city should be the same city.
vacation_start = vacation_df.loc[vacation_df["City"]=="Batticaloa"]
vacation_end = vacation_df.loc[vacation_df["City"]=="Batticaloa"]
vacation_stop1 = vacation_df.loc[vacation_df["City"]=="Hambantota"]
vacation_stop2 = vacation_df.loc[vacation_df["City"]=="Matara"]
vacation_stop3 = vacation_df.loc[vacation_df["City"]=="Negombo"]
# 6. Get the latitude-longitude pairs as tuples from each city DataFrame using the to_numpy function and list indexing.
start = list(vacation_start[["Lat","Lng"]].to_numpy()[0])
end = list(vacation_end[["Lat","Lng"]].to_numpy()[0])
stop1 = list(vacation_stop1[["Lat","Lng"]].to_numpy()[0])
stop2 = list(vacation_stop2[["Lat","Lng"]].to_numpy()[0])
stop3 = list(vacation_stop3[["Lat","Lng"]].to_numpy()[0])
# 7. Create a direction layer map using the start and end latitude-longitude pairs,
# and stop1, stop2, and stop3 as the waypoints. The travel_mode should be "DRIVING", "BICYCLING", or "WALKING".
fig =gmaps.figure()
vacation_itinerary = gmaps.directions_layer(start,end,
waypoints=[stop1,stop2,stop3],
travel_mode="DRIVING")
fig.add_layer(vacation_itinerary)
fig
# 8. To create a marker layer map between the four cities.
# Combine the four city DataFrames into one DataFrame using the concat() function.
itinerary_df = pd.concat([vacation_start, vacation_stop1, vacation_stop2, vacation_stop3],ignore_index=True)
itinerary_df
# 9 Using the template add city name, the country code, the weather description and maximum temperature for the city.
info_box_template = """
<dl>
<dt>City</dt><dd>{City}</dd>
<dt>Country</dt><dd>{Country}</dd>
<dt>Weather Description</dt><dd>{Weather Description}</dd>
<dt>Max Temp</dt><dd>{Max Temp} °F</dd>
</dl>
"""
# 10a Get the data from each row and add it to the formatting template and store the data in a list.
hotel_info = [info_box_template.format(**row) for index, row in itinerary_df.iterrows()]
# 10b. Get the latitude and longitude from each row and store in a new DataFrame.
locations= itinerary_df[["Lat", "Lng"]]
locations
# 11a. Add a marker layer for each city to the map.
marker_layer = gmaps.marker_layer(locations, info_box_content=hotel_info)
fig = gmaps.figure(center=(5.5, 85), zoom_level=2)
fig.add_layer(marker_layer)
# 11b. Display the figure
fig
| 0.611962 | 0.857887 |
# DBSCAN
This Code template is for the Cluster analysis using a simple DBSCAN (Density-Based Spatial Clustering of Applications with Noise) Clustering algorithm and includes 2D and 3D cluster visualization of the Clusters.
### Required Packages
```
import operator
import warnings
import itertools
import numpy as np
import pandas as pd
import seaborn as sns
import plotly.express as px
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
import plotly.graph_objects as go
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import DBSCAN
warnings.filterwarnings("ignore")
```
### Initialization
Filepath of CSV file
```
file_path = "C:/Users/aish2/Documents/BlobCity/Datasets/Clustering/heart_failure_clinical_records_dataset.csv"
```
List of features which are required for model training
```
features=['age', 'anaemia', 'creatinine_phosphokinase', 'diabetes',
'ejection_fraction', 'high_blood_pressure', 'platelets',
'serum_creatinine', 'serum_sodium', 'sex', 'smoking', 'time']
```
### Data Fetching
Pandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.
We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry.
```
df=pd.read_csv(file_path)
df.head()
```
### Feature Selections
It is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.
We will assign all the required input features to X.
```
X = df[features]
```
### Data Preprocessing
Since the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes.
```
def NullClearner(df):
if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])):
df.fillna(df.mean(),inplace=True)
return df
elif(isinstance(df, pd.Series)):
df.fillna(df.mode()[0],inplace=True)
return df
else:return df
def EncodeX(df):
return pd.get_dummies(df)
```
Calling preprocessing functions on the feature and target set.
```
x=X.columns.to_list()
for i in x:
X[i]=NullClearner(X[i])
X=EncodeX(X)
X.head()
```
### Model
The DBSCAN algorithm views clusters as areas of high density separated by areas of low density. Due to this rather generic view, clusters found by DBSCAN can be any shape, as opposed to k-means which assumes that clusters are convex shaped. The central component to the DBSCAN is the concept of core samples, which are samples that are in areas of high density.
A cluster is therefore a set of core samples, each close to each other and a set of non-core samples that are close to a core sample.
#### Tuning Parameters
> **eps**:The maximum distance between two samples for one to be considered as in the neighborhood of the other.
> **min_samples**:The number of samples (or total weight) in a neighborhood for a point to be considered as a core point.
> **metric**:The metric to use when calculating distance between instances in a feature array.
> **algorithm**:The algorithm to be used by the NearestNeighbors module to compute pointwise distances and find nearest neighbors.
> **leaf_size**:Leaf size passed to BallTree or cKDTree.
> **p**: The power of the Minkowski metric to be used to calculate distance between points.
[For more detail on API](https://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html)
```
y_pred = DBSCAN(eps=10,min_samples=5,n_jobs=-1).fit_predict(X)
```
### Cluster Analysis
First, we add the cluster labels from the trained model into the copy of the data frame for cluster analysis/visualization.
```
ClusterDF = X.copy()
ClusterDF['ClusterID'] = y_pred
ClusterDF.head()
```
#### Cluster Records
The below bar graphs show the number of data points in each available cluster.
```
ClusterDF['ClusterID'].value_counts().plot(kind='bar')
```
#### Cluster Plots
Below written functions get utilized to plot 2-Dimensional and 3-Dimensional cluster plots on the available set of features in the dataset. Plots include different available clusters along with cluster centroid.
```
def Plot2DCluster(X_Cols,df):
for i in list(itertools.combinations(X_Cols, 2)):
plt.rcParams["figure.figsize"] = (8,6)
xi,yi=df.columns.get_loc(i[0]),df.columns.get_loc(i[1])
for j in df['ClusterID'].unique():
DFC=df[df.ClusterID==j]
plt.scatter(DFC[i[0]],DFC[i[1]],cmap=plt.cm.Accent,label=j)
plt.xlabel(i[0])
plt.ylabel(i[1])
plt.legend()
plt.show()
def Plot3DCluster(X_Cols,df):
for i in list(itertools.combinations(X_Cols, 3)):
xi,yi,zi=df.columns.get_loc(i[0]),df.columns.get_loc(i[1]),df.columns.get_loc(i[2])
fig,ax = plt.figure(figsize = (16, 10)),plt.axes(projection ="3d")
ax.grid(b = True, color ='grey',linestyle ='-.',linewidth = 0.3,alpha = 0.2)
for j in df['ClusterID'].unique():
DFC=df[df.ClusterID==j]
ax.scatter3D(DFC[i[0]],DFC[i[1]],DFC[i[2]],alpha = 0.8,cmap=plt.cm.Accent,label=j)
ax.set_xlabel(i[0])
ax.set_ylabel(i[1])
ax.set_zlabel(i[2])
plt.legend()
plt.show()
def Plotly3D(X_Cols,df):
for i in list(itertools.combinations(X_Cols,3)):
xi,yi,zi=df.columns.get_loc(i[0]),df.columns.get_loc(i[1]),df.columns.get_loc(i[2])
fig2=px.scatter_3d(df, x=i[0], y=i[1],z=i[2],color=df['ClusterID'])
fig2.show()
sns.set_style("whitegrid")
sns.set_context("talk")
plt.rcParams["lines.markeredgewidth"] = 1
sns.pairplot(data=ClusterDF, hue='ClusterID', palette='Dark2', height=5)
Plot2DCluster(X.columns,ClusterDF)
Plot3DCluster(X.columns,ClusterDF)
Plotly3D(X.columns,ClusterDF)
```
#### [Created by Thilakraj Devadiga](https://github.com/Thilakraj1998)
|
github_jupyter
|
import operator
import warnings
import itertools
import numpy as np
import pandas as pd
import seaborn as sns
import plotly.express as px
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
import plotly.graph_objects as go
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import DBSCAN
warnings.filterwarnings("ignore")
file_path = "C:/Users/aish2/Documents/BlobCity/Datasets/Clustering/heart_failure_clinical_records_dataset.csv"
features=['age', 'anaemia', 'creatinine_phosphokinase', 'diabetes',
'ejection_fraction', 'high_blood_pressure', 'platelets',
'serum_creatinine', 'serum_sodium', 'sex', 'smoking', 'time']
df=pd.read_csv(file_path)
df.head()
X = df[features]
def NullClearner(df):
if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])):
df.fillna(df.mean(),inplace=True)
return df
elif(isinstance(df, pd.Series)):
df.fillna(df.mode()[0],inplace=True)
return df
else:return df
def EncodeX(df):
return pd.get_dummies(df)
x=X.columns.to_list()
for i in x:
X[i]=NullClearner(X[i])
X=EncodeX(X)
X.head()
y_pred = DBSCAN(eps=10,min_samples=5,n_jobs=-1).fit_predict(X)
ClusterDF = X.copy()
ClusterDF['ClusterID'] = y_pred
ClusterDF.head()
ClusterDF['ClusterID'].value_counts().plot(kind='bar')
def Plot2DCluster(X_Cols,df):
for i in list(itertools.combinations(X_Cols, 2)):
plt.rcParams["figure.figsize"] = (8,6)
xi,yi=df.columns.get_loc(i[0]),df.columns.get_loc(i[1])
for j in df['ClusterID'].unique():
DFC=df[df.ClusterID==j]
plt.scatter(DFC[i[0]],DFC[i[1]],cmap=plt.cm.Accent,label=j)
plt.xlabel(i[0])
plt.ylabel(i[1])
plt.legend()
plt.show()
def Plot3DCluster(X_Cols,df):
for i in list(itertools.combinations(X_Cols, 3)):
xi,yi,zi=df.columns.get_loc(i[0]),df.columns.get_loc(i[1]),df.columns.get_loc(i[2])
fig,ax = plt.figure(figsize = (16, 10)),plt.axes(projection ="3d")
ax.grid(b = True, color ='grey',linestyle ='-.',linewidth = 0.3,alpha = 0.2)
for j in df['ClusterID'].unique():
DFC=df[df.ClusterID==j]
ax.scatter3D(DFC[i[0]],DFC[i[1]],DFC[i[2]],alpha = 0.8,cmap=plt.cm.Accent,label=j)
ax.set_xlabel(i[0])
ax.set_ylabel(i[1])
ax.set_zlabel(i[2])
plt.legend()
plt.show()
def Plotly3D(X_Cols,df):
for i in list(itertools.combinations(X_Cols,3)):
xi,yi,zi=df.columns.get_loc(i[0]),df.columns.get_loc(i[1]),df.columns.get_loc(i[2])
fig2=px.scatter_3d(df, x=i[0], y=i[1],z=i[2],color=df['ClusterID'])
fig2.show()
sns.set_style("whitegrid")
sns.set_context("talk")
plt.rcParams["lines.markeredgewidth"] = 1
sns.pairplot(data=ClusterDF, hue='ClusterID', palette='Dark2', height=5)
Plot2DCluster(X.columns,ClusterDF)
Plot3DCluster(X.columns,ClusterDF)
Plotly3D(X.columns,ClusterDF)
| 0.349089 | 0.945197 |
## Comparison of Categorical Variable Encodings
In this lecture, we will compare the performance of the different feature categorical encoding techniques we learned so far.
We will compare:
- One hot encoding
- Replacing labels by the count
- Ordering labels according to target
- Mean Encoding
- WoE
Using the titanic dataset
```
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_auc_score
# let's load the titanic dataset
# we will only use these columns in the demo
cols = ['pclass', 'age', 'sibsp', 'parch', 'fare',
'sex', 'cabin', 'embarked', 'survived']
data = pd.read_csv('../titanic.csv', usecols=cols)
data.head()
# let's check for missing data
data.isnull().sum()
# Drop observations with NA in Fare and embarked
data.dropna(subset=['fare', 'embarked'], inplace=True)
# Now we extract the first letter of the cabin
data['cabin'] = data['cabin'].astype(str).str[0]
data.head()
# drop observations with cabin = T, they are too few
data = data[data['cabin'] != 'T']
# Let's divide into train and test set
X_train, X_test, y_train, y_test = train_test_split(
data.drop(labels='survived', axis=1), # predictors
data['survived'], # target
test_size=0.3,
random_state=0)
X_train.shape, X_test.shape
# Let's replace null values in numerical variables by the mean
def impute_na(df, variable, value):
df[variable].fillna(value, inplace=True)
impute_na(X_test, 'age', X_train['age'].mean())
impute_na(X_train, 'age', X_train['age'].mean())
# note how I impute first the test set, this way the value of
# the median used will be the same for both train and test
X_train.head()
# let's check that we have no missing data after NA imputation
X_train.isnull().sum(), X_test.isnull().sum()
```
### One Hot Encoding
```
def get_OHE(df):
df_OHE = pd.concat(
[df[['pclass', 'age', 'sibsp', 'parch', 'fare']],
pd.get_dummies(df[['sex', 'cabin', 'embarked']], drop_first=True)],
axis=1)
return df_OHE
X_train_OHE = get_OHE(X_train)
X_test_OHE = get_OHE(X_test)
X_train_OHE.head()
X_test_OHE.head()
```
### Count encoding
```
def categorical_to_counts(df_train, df_test):
# make a temporary copy of the original dataframes
df_train_temp = df_train.copy()
df_test_temp = df_test.copy()
for col in ['sex', 'cabin', 'embarked']:
# make dictionary mapping category to counts
counts_map = df_train_temp[col].value_counts().to_dict()
# remap the labels to their counts
df_train_temp[col] = df_train_temp[col].map(counts_map)
df_test_temp[col] = df_test_temp[col].map(counts_map)
return df_train_temp, df_test_temp
X_train_count, X_test_count = categorical_to_counts(X_train, X_test)
X_train_count.head()
```
### Ordered Integer Encoding
```
def categories_to_ordered(df_train, df_test, y_train, y_test):
# make a temporary copy of the datasets
df_train_temp = pd.concat([df_train, y_train], axis=1).copy()
df_test_temp = pd.concat([df_test, y_test], axis=1).copy()
for col in ['sex', 'cabin', 'embarked']:
# order categories according to target mean
ordered_labels = df_train_temp.groupby(
[col])['survived'].mean().sort_values().index
# create the dictionary to map the ordered labels to an ordinal number
ordinal_label = {k: i for i, k in enumerate(ordered_labels, 0)}
# remap the categories to these ordinal numbers
df_train_temp[col] = df_train[col].map(ordinal_label)
df_test_temp[col] = df_test[col].map(ordinal_label)
# remove the target
df_train_temp.drop(['survived'], axis=1, inplace=True)
df_test_temp.drop(['survived'], axis=1, inplace=True)
return df_train_temp, df_test_temp
X_train_ordered, X_test_ordered = categories_to_ordered(
X_train, X_test, y_train, y_test)
X_train_ordered.head()
```
### Mean Encoding
```
def categories_to_mean(df_train, df_test, y_train, y_test):
# make a temporary copy of the datasets
df_train_temp = pd.concat([df_train, y_train], axis=1).copy()
df_test_temp = pd.concat([df_test, y_test], axis=1).copy()
for col in ['sex', 'cabin', 'embarked']:
# calculate mean target per category
ordered_labels = df_train_temp.groupby(
[col])['survived'].mean().to_dict()
# remap the categories to target mean
df_train_temp[col] = df_train[col].map(ordered_labels)
df_test_temp[col] = df_test[col].map(ordered_labels)
# remove the target
df_train_temp.drop(['survived'], axis=1, inplace=True)
df_test_temp.drop(['survived'], axis=1, inplace=True)
return df_train_temp, df_test_temp
X_train_mean, X_test_mean = categories_to_mean(
X_train, X_test, y_train, y_test)
X_train_mean.head()
```
### Probability Ratio
```
def categories_to_ratio(df_train, df_test, y_train, y_test):
# make a temporary copy of the datasets
df_train_temp = pd.concat([df_train, y_train], axis=1).copy()
df_test_temp = pd.concat([df_test, y_test], axis=1).copy()
for col in ['sex', 'cabin', 'embarked']:
# create df containing the different parts of the WoE equation
# prob survived =1
prob_df = pd.DataFrame(df_train_temp.groupby([col])['survived'].mean())
# prob survived = 0
prob_df['died'] = 1-prob_df.survived
# calculate WoE
prob_df['Ratio'] = np.log(prob_df.survived/prob_df.died)
# capture woe in dictionary
woe = prob_df['Ratio'].to_dict()
# re-map the labels to WoE
df_train_temp[col] = df_train[col].map(woe)
df_test_temp[col] = df_test[col].map(woe)
# drop the target
df_train_temp.drop(['survived'], axis=1, inplace=True)
df_test_temp.drop(['survived'], axis=1, inplace=True)
return df_train_temp, df_test_temp
X_train_ratio, X_test_ratio = categories_to_ratio(X_train, X_test, y_train, y_test)
X_train_ratio.head()
```
### Random Forest Performance
```
# create a function to build random forests and compare performance in train and test set
def run_randomForests(X_train, X_test, y_train, y_test):
rf = RandomForestClassifier(n_estimators=50, random_state=39, max_depth=3)
rf.fit(X_train, y_train)
print('Train set')
pred = rf.predict_proba(X_train)
print(
'Random Forests roc-auc: {}'.format(roc_auc_score(y_train, pred[:, 1])))
print('Test set')
pred = rf.predict_proba(X_test)
print(
'Random Forests roc-auc: {}'.format(roc_auc_score(y_test, pred[:, 1])))
# OHE
run_randomForests(X_train_OHE, X_test_OHE, y_train, y_test)
# counts
run_randomForests(X_train_count, X_test_count, y_train, y_test)
# ordered labels
run_randomForests(X_train_ordered, X_test_ordered, y_train, y_test)
# mean encoding
run_randomForests(X_train_mean, X_test_mean, y_train, y_test)
# ratio
run_randomForests(X_train_ratio, X_test_ratio, y_train, y_test)
```
Comparing the roc_auc values on the test sets, we can see that one hot encoding has the worse performance. This makes sense because trees do not perform well in datasets with big feature spaces.
The remaining encodings returned similar performances. This also makes sense, because trees are non-linear models, so target guided encodings may not necessarily improve the model performance
### Logistic Regression Performance
```
def run_logistic(X_train, X_test, y_train, y_test):
# function to train and test the performance of logistic regression
logit = LogisticRegression(random_state=44, C=0.01, max_iter=100)
logit.fit(X_train, y_train)
print('Train set')
pred = logit.predict_proba(X_train)
print(
'Logistic Regression roc-auc: {}'.format(roc_auc_score(y_train, pred[:, 1])))
print('Test set')
pred = logit.predict_proba(X_test)
print(
'Logistic Regression roc-auc: {}'.format(roc_auc_score(y_test, pred[:, 1])))
# OHE
run_logistic(X_train_OHE, X_test_OHE, y_train, y_test)
# counts
run_logistic(X_train_count, X_test_count, y_train, y_test)
# ordered labels
run_logistic(X_train_ordered, X_test_ordered, y_train, y_test)
# mean encoding
run_logistic(X_train_mean, X_test_mean, y_train, y_test)
# ratio
run_logistic(X_train_ratio, X_test_ratio, y_train, y_test)
```
For Logistic regression, the best performances are obtained with one hot encoding, as it preserves linear relationships with variables and target, and also with weight of evidence, and ordered encoding.
Note however how count encoding, returns the worse performance as it does not create a monotonic relationship between variables and target, and in this case, mean target encoding is probably causing over-fitting.
|
github_jupyter
|
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_auc_score
# let's load the titanic dataset
# we will only use these columns in the demo
cols = ['pclass', 'age', 'sibsp', 'parch', 'fare',
'sex', 'cabin', 'embarked', 'survived']
data = pd.read_csv('../titanic.csv', usecols=cols)
data.head()
# let's check for missing data
data.isnull().sum()
# Drop observations with NA in Fare and embarked
data.dropna(subset=['fare', 'embarked'], inplace=True)
# Now we extract the first letter of the cabin
data['cabin'] = data['cabin'].astype(str).str[0]
data.head()
# drop observations with cabin = T, they are too few
data = data[data['cabin'] != 'T']
# Let's divide into train and test set
X_train, X_test, y_train, y_test = train_test_split(
data.drop(labels='survived', axis=1), # predictors
data['survived'], # target
test_size=0.3,
random_state=0)
X_train.shape, X_test.shape
# Let's replace null values in numerical variables by the mean
def impute_na(df, variable, value):
df[variable].fillna(value, inplace=True)
impute_na(X_test, 'age', X_train['age'].mean())
impute_na(X_train, 'age', X_train['age'].mean())
# note how I impute first the test set, this way the value of
# the median used will be the same for both train and test
X_train.head()
# let's check that we have no missing data after NA imputation
X_train.isnull().sum(), X_test.isnull().sum()
def get_OHE(df):
df_OHE = pd.concat(
[df[['pclass', 'age', 'sibsp', 'parch', 'fare']],
pd.get_dummies(df[['sex', 'cabin', 'embarked']], drop_first=True)],
axis=1)
return df_OHE
X_train_OHE = get_OHE(X_train)
X_test_OHE = get_OHE(X_test)
X_train_OHE.head()
X_test_OHE.head()
def categorical_to_counts(df_train, df_test):
# make a temporary copy of the original dataframes
df_train_temp = df_train.copy()
df_test_temp = df_test.copy()
for col in ['sex', 'cabin', 'embarked']:
# make dictionary mapping category to counts
counts_map = df_train_temp[col].value_counts().to_dict()
# remap the labels to their counts
df_train_temp[col] = df_train_temp[col].map(counts_map)
df_test_temp[col] = df_test_temp[col].map(counts_map)
return df_train_temp, df_test_temp
X_train_count, X_test_count = categorical_to_counts(X_train, X_test)
X_train_count.head()
def categories_to_ordered(df_train, df_test, y_train, y_test):
# make a temporary copy of the datasets
df_train_temp = pd.concat([df_train, y_train], axis=1).copy()
df_test_temp = pd.concat([df_test, y_test], axis=1).copy()
for col in ['sex', 'cabin', 'embarked']:
# order categories according to target mean
ordered_labels = df_train_temp.groupby(
[col])['survived'].mean().sort_values().index
# create the dictionary to map the ordered labels to an ordinal number
ordinal_label = {k: i for i, k in enumerate(ordered_labels, 0)}
# remap the categories to these ordinal numbers
df_train_temp[col] = df_train[col].map(ordinal_label)
df_test_temp[col] = df_test[col].map(ordinal_label)
# remove the target
df_train_temp.drop(['survived'], axis=1, inplace=True)
df_test_temp.drop(['survived'], axis=1, inplace=True)
return df_train_temp, df_test_temp
X_train_ordered, X_test_ordered = categories_to_ordered(
X_train, X_test, y_train, y_test)
X_train_ordered.head()
def categories_to_mean(df_train, df_test, y_train, y_test):
# make a temporary copy of the datasets
df_train_temp = pd.concat([df_train, y_train], axis=1).copy()
df_test_temp = pd.concat([df_test, y_test], axis=1).copy()
for col in ['sex', 'cabin', 'embarked']:
# calculate mean target per category
ordered_labels = df_train_temp.groupby(
[col])['survived'].mean().to_dict()
# remap the categories to target mean
df_train_temp[col] = df_train[col].map(ordered_labels)
df_test_temp[col] = df_test[col].map(ordered_labels)
# remove the target
df_train_temp.drop(['survived'], axis=1, inplace=True)
df_test_temp.drop(['survived'], axis=1, inplace=True)
return df_train_temp, df_test_temp
X_train_mean, X_test_mean = categories_to_mean(
X_train, X_test, y_train, y_test)
X_train_mean.head()
def categories_to_ratio(df_train, df_test, y_train, y_test):
# make a temporary copy of the datasets
df_train_temp = pd.concat([df_train, y_train], axis=1).copy()
df_test_temp = pd.concat([df_test, y_test], axis=1).copy()
for col in ['sex', 'cabin', 'embarked']:
# create df containing the different parts of the WoE equation
# prob survived =1
prob_df = pd.DataFrame(df_train_temp.groupby([col])['survived'].mean())
# prob survived = 0
prob_df['died'] = 1-prob_df.survived
# calculate WoE
prob_df['Ratio'] = np.log(prob_df.survived/prob_df.died)
# capture woe in dictionary
woe = prob_df['Ratio'].to_dict()
# re-map the labels to WoE
df_train_temp[col] = df_train[col].map(woe)
df_test_temp[col] = df_test[col].map(woe)
# drop the target
df_train_temp.drop(['survived'], axis=1, inplace=True)
df_test_temp.drop(['survived'], axis=1, inplace=True)
return df_train_temp, df_test_temp
X_train_ratio, X_test_ratio = categories_to_ratio(X_train, X_test, y_train, y_test)
X_train_ratio.head()
# create a function to build random forests and compare performance in train and test set
def run_randomForests(X_train, X_test, y_train, y_test):
rf = RandomForestClassifier(n_estimators=50, random_state=39, max_depth=3)
rf.fit(X_train, y_train)
print('Train set')
pred = rf.predict_proba(X_train)
print(
'Random Forests roc-auc: {}'.format(roc_auc_score(y_train, pred[:, 1])))
print('Test set')
pred = rf.predict_proba(X_test)
print(
'Random Forests roc-auc: {}'.format(roc_auc_score(y_test, pred[:, 1])))
# OHE
run_randomForests(X_train_OHE, X_test_OHE, y_train, y_test)
# counts
run_randomForests(X_train_count, X_test_count, y_train, y_test)
# ordered labels
run_randomForests(X_train_ordered, X_test_ordered, y_train, y_test)
# mean encoding
run_randomForests(X_train_mean, X_test_mean, y_train, y_test)
# ratio
run_randomForests(X_train_ratio, X_test_ratio, y_train, y_test)
def run_logistic(X_train, X_test, y_train, y_test):
# function to train and test the performance of logistic regression
logit = LogisticRegression(random_state=44, C=0.01, max_iter=100)
logit.fit(X_train, y_train)
print('Train set')
pred = logit.predict_proba(X_train)
print(
'Logistic Regression roc-auc: {}'.format(roc_auc_score(y_train, pred[:, 1])))
print('Test set')
pred = logit.predict_proba(X_test)
print(
'Logistic Regression roc-auc: {}'.format(roc_auc_score(y_test, pred[:, 1])))
# OHE
run_logistic(X_train_OHE, X_test_OHE, y_train, y_test)
# counts
run_logistic(X_train_count, X_test_count, y_train, y_test)
# ordered labels
run_logistic(X_train_ordered, X_test_ordered, y_train, y_test)
# mean encoding
run_logistic(X_train_mean, X_test_mean, y_train, y_test)
# ratio
run_logistic(X_train_ratio, X_test_ratio, y_train, y_test)
| 0.610918 | 0.951323 |
```
import sys
sys.path.append("..") # Adds higher directory to python modules path.
from scratch.linear_algebra import Vector, dot
def sum_of_squares(v: Vector) -> float:
"""Computes the sum of squared elements in v"""
return dot(v, v)
from typing import Callable
def difference_quotient(f: Callable[[float], float],
x: float,
h: float) -> float:
return (f(x + h) - f(x)) / h
def square(x: float) -> float:
return x * x
def derivative(x: float) -> float:
return 2 * x
def estimate_gradient(f: Callable[[Vector], float],
v: Vector,
h: float = 0.0001):
return [partial_difference_quotient(f, v, i, h)
for i in range(len(v))]
import random
from scratch.linear_algebra import distance, add, scalar_multiply
def gradient_step(v: Vector, gradient: Vector, step_size: float) -> Vector:
"""Moves `step_size` in the `gradient` direction from `v`"""
assert len(v) == len(gradient)
step = scalar_multiply(step_size, gradient)
return add(v, step)
def sum_of_squares_gradient(v: Vector) -> Vector:
return [2 * v_i for v_i in v]
# x ranges from -50 to 49, y is always 20 * x + 5
inputs = [(x, 20 * x + 5) for x in range(-50, 50)]
def linear_gradient(x: float, y: float, theta: Vector) -> Vector:
slope, intercept = theta
predicted = slope * x + intercept # The prediction of the model.
error = (predicted - y) # error is (predicted - actual)
squared_error = error ** 2 # We'll minimize squared error
grad = [2 * error * x, 2 * error] # using its gradient.
return grad
from typing import TypeVar, List, Iterator
T = TypeVar('T') # this allows us to type "generic" functions
def minibatches(dataset: List[T],
batch_size: int,
shuffle: bool = True) -> Iterator[List[T]]:
"""Generates `batch_size`-sized minibatches from the dataset"""
# Start indexes 0, batch_size, 2 * batch_size, ...
batch_starts = [start for start in range(0, len(dataset), batch_size)]
if shuffle: random.shuffle(batch_starts) # shuffle the batches
for start in batch_starts:
end = start + batch_size
yield dataset[start:end]
def main():
xs = range(-10, 11)
actuals = [derivative(x) for x in xs]
estimates = [difference_quotient(square, x, h=0.001) for x in xs]
# plot to show they're basically the same
import matplotlib.pyplot as plt
plt.title("Actual Derivatives vs. Estimates")
plt.plot(xs, actuals, 'rx', label='Actual') # red x
plt.plot(xs, estimates, 'b+', label='Estimate') # blue +
plt.legend(loc=9)
# plt.show()
plt.close()
def partial_difference_quotient(f: Callable[[Vector], float],
v: Vector,
i: int,
h: float) -> float:
"""Returns the i-th partial difference quotient of f at v"""
w = [v_j + (h if j == i else 0) # add h to just the ith element of v
for j, v_j in enumerate(v)]
return (f(w) - f(v)) / h
# "Using the Gradient" example
# pick a random starting point
v = [random.uniform(-10, 10) for i in range(3)]
for epoch in range(1000):
grad = sum_of_squares_gradient(v) # compute the gradient at v
v = gradient_step(v, grad, -0.01) # take a negative gradient step
print(epoch, v)
assert distance(v, [0, 0, 0]) < 0.001 # v should be close to 0
# First "Using Gradient Descent to Fit Models" example
from scratch.linear_algebra import vector_mean
# Start with random values for slope and intercept.
theta = [random.uniform(-1, 1), random.uniform(-1, 1)]
learning_rate = 0.001
for epoch in range(5000):
# Compute the mean of the gradients
grad = vector_mean([linear_gradient(x, y, theta) for x, y in inputs])
# Take a step in that direction
theta = gradient_step(theta, grad, -learning_rate)
print(epoch, theta)
slope, intercept = theta
assert 19.9 < slope < 20.1, "slope should be about 20"
assert 4.9 < intercept < 5.1, "intercept should be about 5"
# Minibatch gradient descent example
theta = [random.uniform(-1, 1), random.uniform(-1, 1)]
for epoch in range(1000):
for batch in minibatches(inputs, batch_size=20):
grad = vector_mean([linear_gradient(x, y, theta) for x, y in batch])
theta = gradient_step(theta, grad, -learning_rate)
print(epoch, theta)
slope, intercept = theta
assert 19.9 < slope < 20.1, "slope should be about 20"
assert 4.9 < intercept < 5.1, "intercept should be about 5"
# Stochastic gradient descent example
theta = [random.uniform(-1, 1), random.uniform(-1, 1)]
for epoch in range(100):
for x, y in inputs:
grad = linear_gradient(x, y, theta)
theta = gradient_step(theta, grad, -learning_rate)
print(epoch, theta)
slope, intercept = theta
assert 19.9 < slope < 20.1, "slope should be about 20"
assert 4.9 < intercept < 5.1, "intercept should be about 5"
if __name__ == "__main__": main()
```
|
github_jupyter
|
import sys
sys.path.append("..") # Adds higher directory to python modules path.
from scratch.linear_algebra import Vector, dot
def sum_of_squares(v: Vector) -> float:
"""Computes the sum of squared elements in v"""
return dot(v, v)
from typing import Callable
def difference_quotient(f: Callable[[float], float],
x: float,
h: float) -> float:
return (f(x + h) - f(x)) / h
def square(x: float) -> float:
return x * x
def derivative(x: float) -> float:
return 2 * x
def estimate_gradient(f: Callable[[Vector], float],
v: Vector,
h: float = 0.0001):
return [partial_difference_quotient(f, v, i, h)
for i in range(len(v))]
import random
from scratch.linear_algebra import distance, add, scalar_multiply
def gradient_step(v: Vector, gradient: Vector, step_size: float) -> Vector:
"""Moves `step_size` in the `gradient` direction from `v`"""
assert len(v) == len(gradient)
step = scalar_multiply(step_size, gradient)
return add(v, step)
def sum_of_squares_gradient(v: Vector) -> Vector:
return [2 * v_i for v_i in v]
# x ranges from -50 to 49, y is always 20 * x + 5
inputs = [(x, 20 * x + 5) for x in range(-50, 50)]
def linear_gradient(x: float, y: float, theta: Vector) -> Vector:
slope, intercept = theta
predicted = slope * x + intercept # The prediction of the model.
error = (predicted - y) # error is (predicted - actual)
squared_error = error ** 2 # We'll minimize squared error
grad = [2 * error * x, 2 * error] # using its gradient.
return grad
from typing import TypeVar, List, Iterator
T = TypeVar('T') # this allows us to type "generic" functions
def minibatches(dataset: List[T],
batch_size: int,
shuffle: bool = True) -> Iterator[List[T]]:
"""Generates `batch_size`-sized minibatches from the dataset"""
# Start indexes 0, batch_size, 2 * batch_size, ...
batch_starts = [start for start in range(0, len(dataset), batch_size)]
if shuffle: random.shuffle(batch_starts) # shuffle the batches
for start in batch_starts:
end = start + batch_size
yield dataset[start:end]
def main():
xs = range(-10, 11)
actuals = [derivative(x) for x in xs]
estimates = [difference_quotient(square, x, h=0.001) for x in xs]
# plot to show they're basically the same
import matplotlib.pyplot as plt
plt.title("Actual Derivatives vs. Estimates")
plt.plot(xs, actuals, 'rx', label='Actual') # red x
plt.plot(xs, estimates, 'b+', label='Estimate') # blue +
plt.legend(loc=9)
# plt.show()
plt.close()
def partial_difference_quotient(f: Callable[[Vector], float],
v: Vector,
i: int,
h: float) -> float:
"""Returns the i-th partial difference quotient of f at v"""
w = [v_j + (h if j == i else 0) # add h to just the ith element of v
for j, v_j in enumerate(v)]
return (f(w) - f(v)) / h
# "Using the Gradient" example
# pick a random starting point
v = [random.uniform(-10, 10) for i in range(3)]
for epoch in range(1000):
grad = sum_of_squares_gradient(v) # compute the gradient at v
v = gradient_step(v, grad, -0.01) # take a negative gradient step
print(epoch, v)
assert distance(v, [0, 0, 0]) < 0.001 # v should be close to 0
# First "Using Gradient Descent to Fit Models" example
from scratch.linear_algebra import vector_mean
# Start with random values for slope and intercept.
theta = [random.uniform(-1, 1), random.uniform(-1, 1)]
learning_rate = 0.001
for epoch in range(5000):
# Compute the mean of the gradients
grad = vector_mean([linear_gradient(x, y, theta) for x, y in inputs])
# Take a step in that direction
theta = gradient_step(theta, grad, -learning_rate)
print(epoch, theta)
slope, intercept = theta
assert 19.9 < slope < 20.1, "slope should be about 20"
assert 4.9 < intercept < 5.1, "intercept should be about 5"
# Minibatch gradient descent example
theta = [random.uniform(-1, 1), random.uniform(-1, 1)]
for epoch in range(1000):
for batch in minibatches(inputs, batch_size=20):
grad = vector_mean([linear_gradient(x, y, theta) for x, y in batch])
theta = gradient_step(theta, grad, -learning_rate)
print(epoch, theta)
slope, intercept = theta
assert 19.9 < slope < 20.1, "slope should be about 20"
assert 4.9 < intercept < 5.1, "intercept should be about 5"
# Stochastic gradient descent example
theta = [random.uniform(-1, 1), random.uniform(-1, 1)]
for epoch in range(100):
for x, y in inputs:
grad = linear_gradient(x, y, theta)
theta = gradient_step(theta, grad, -learning_rate)
print(epoch, theta)
slope, intercept = theta
assert 19.9 < slope < 20.1, "slope should be about 20"
assert 4.9 < intercept < 5.1, "intercept should be about 5"
if __name__ == "__main__": main()
| 0.854794 | 0.880386 |
# Introduction to Deep Learning with PyTorch
In this notebook, you'll get introduced to [PyTorch](http://pytorch.org/), a framework for building and training neural networks. PyTorch in a lot of ways behaves like the arrays you love from Numpy. These Numpy arrays, after all, are just tensors. PyTorch takes these tensors and makes it simple to move them to GPUs for the faster processing needed when training neural networks. It also provides a module that automatically calculates gradients (for backpropagation!) and another module specifically for building neural networks. All together, PyTorch ends up being more coherent with Python and the Numpy/Scipy stack compared to TensorFlow and other frameworks.
## Neural Networks
Deep Learning is based on artificial neural networks which have been around in some form since the late 1950s. The networks are built from individual parts approximating neurons, typically called units or simply "neurons." Each unit has some number of weighted inputs. These weighted inputs are summed together (a linear combination) then passed through an activation function to get the unit's output.
<img src="assets/simple_neuron.png" width=400px>
Mathematically this looks like:
$$
\begin{align}
y &= f(w_1 x_1 + w_2 x_2 + b) \\
y &= f\left(\sum_i w_i x_i +b \right)
\end{align}
$$
With vectors this is the dot/inner product of two vectors:
$$
h = \begin{bmatrix}
x_1 \, x_2 \cdots x_n
\end{bmatrix}
\cdot
\begin{bmatrix}
w_1 \\
w_2 \\
\vdots \\
w_n
\end{bmatrix}
$$
## Tensors
It turns out neural network computations are just a bunch of linear algebra operations on *tensors*, a generalization of matrices. A vector is a 1-dimensional tensor, a matrix is a 2-dimensional tensor, an array with three indices is a 3-dimensional tensor (RGB color images for example). The fundamental data structure for neural networks are tensors and PyTorch (as well as pretty much every other deep learning framework) is built around tensors.
<img src="assets/tensor_examples.svg" width=600px>
With the basics covered, it's time to explore how we can use PyTorch to build a simple neural network.
```
# First, import PyTorch
import torch
def activation(x):
""" Sigmoid activation function
Arguments
---------
x: torch.Tensor
"""
return 1/(1+torch.exp(-x))
### Generate some data
torch.manual_seed(7) # Set the random seed so things are predictable
# Features are 5 random normal variables
features = torch.randn((1, 5))
# True weights for our data, random normal variables again
weights = torch.randn_like(features)
# and a true bias term
bias = torch.randn((1, 1))
```
Above I generated data we can use to get the output of our simple network. This is all just random for now, going forward we'll start using normal data. Going through each relevant line:
`features = torch.randn((1, 5))` creates a tensor with shape `(1, 5)`, one row and five columns, that contains values randomly distributed according to the normal distribution with a mean of zero and standard deviation of one.
`weights = torch.randn_like(features)` creates another tensor with the same shape as `features`, again containing values from a normal distribution.
Finally, `bias = torch.randn((1, 1))` creates a single value from a normal distribution.
PyTorch tensors can be added, multiplied, subtracted, etc, just like Numpy arrays. In general, you'll use PyTorch tensors pretty much the same way you'd use Numpy arrays. They come with some nice benefits though such as GPU acceleration which we'll get to later. For now, use the generated data to calculate the output of this simple single layer network.
> **Exercise**: Calculate the output of the network with input features `features`, weights `weights`, and bias `bias`. Similar to Numpy, PyTorch has a [`torch.sum()`](https://pytorch.org/docs/stable/torch.html#torch.sum) function, as well as a `.sum()` method on tensors, for taking sums. Use the function `activation` defined above as the activation function.
```
## Calculate the output of this network using the weights and bias tensors
print( activation(torch.sum(features*weights) + bias))
```
You can do the multiplication and sum in the same operation using a matrix multiplication. In general, you'll want to use matrix multiplications since they are more efficient and accelerated using modern libraries and high-performance computing on GPUs.
Here, we want to do a matrix multiplication of the features and the weights. For this we can use [`torch.mm()`](https://pytorch.org/docs/stable/torch.html#torch.mm) or [`torch.matmul()`](https://pytorch.org/docs/stable/torch.html#torch.matmul) which is somewhat more complicated and supports broadcasting. If we try to do it with `features` and `weights` as they are, we'll get an error
```python
>> torch.mm(features, weights)
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-13-15d592eb5279> in <module>()
----> 1 torch.mm(features, weights)
RuntimeError: size mismatch, m1: [1 x 5], m2: [1 x 5] at /Users/soumith/minicondabuild3/conda-bld/pytorch_1524590658547/work/aten/src/TH/generic/THTensorMath.c:2033
```
As you're building neural networks in any framework, you'll see this often. Really often. What's happening here is our tensors aren't the correct shapes to perform a matrix multiplication. Remember that for matrix multiplications, the number of columns in the first tensor must equal to the number of rows in the second column. Both `features` and `weights` have the same shape, `(1, 5)`. This means we need to change the shape of `weights` to get the matrix multiplication to work.
**Note:** To see the shape of a tensor called `tensor`, use `tensor.shape`. If you're building neural networks, you'll be using this method often.
There are a few options here: [`weights.reshape()`](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.reshape), [`weights.resize_()`](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.resize_), and [`weights.view()`](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.view).
* `weights.reshape(a, b)` will return a new tensor with the same data as `weights` with size `(a, b)` sometimes, and sometimes a clone, as in it copies the data to another part of memory.
* `weights.resize_(a, b)` returns the same tensor with a different shape. However, if the new shape results in fewer elements than the original tensor, some elements will be removed from the tensor (but not from memory). If the new shape results in more elements than the original tensor, new elements will be uninitialized in memory. Here I should note that the underscore at the end of the method denotes that this method is performed **in-place**. Here is a great forum thread to [read more about in-place operations](https://discuss.pytorch.org/t/what-is-in-place-operation/16244) in PyTorch.
* `weights.view(a, b)` will return a new tensor with the same data as `weights` with size `(a, b)`.
I usually use `.view()`, but any of the three methods will work for this. So, now we can reshape `weights` to have five rows and one column with something like `weights.view(5, 1)`.
> **Exercise**: Calculate the output of our little network using matrix multiplication.
```
## Calculate the output of this network using matrix multiplication
activation(torch.mm(features, weights.view(5,1)) + bias)
```
### Stack them up!
That's how you can calculate the output for a single neuron. The real power of this algorithm happens when you start stacking these individual units into layers and stacks of layers, into a network of neurons. The output of one layer of neurons becomes the input for the next layer. With multiple input units and output units, we now need to express the weights as a matrix.
<img src='assets/multilayer_diagram_weights.png' width=450px>
The first layer shown on the bottom here are the inputs, understandably called the **input layer**. The middle layer is called the **hidden layer**, and the final layer (on the right) is the **output layer**. We can express this network mathematically with matrices again and use matrix multiplication to get linear combinations for each unit in one operation. For example, the hidden layer ($h_1$ and $h_2$ here) can be calculated
$$
\vec{h} = [h_1 \, h_2] =
\begin{bmatrix}
x_1 \, x_2 \cdots \, x_n
\end{bmatrix}
\cdot
\begin{bmatrix}
w_{11} & w_{12} \\
w_{21} &w_{22} \\
\vdots &\vdots \\
w_{n1} &w_{n2}
\end{bmatrix}
$$
The output for this small network is found by treating the hidden layer as inputs for the output unit. The network output is expressed simply
$$
y = f_2 \! \left(\, f_1 \! \left(\vec{x} \, \mathbf{W_1}\right) \mathbf{W_2} \right)
$$
```
### Generate some data
torch.manual_seed(7) # Set the random seed so things are predictable
# Features are 3 random normal variables
features = torch.randn((1, 3))
# Define the size of each layer in our network
n_input = features.shape[1] # Number of input units, must match number of input features
n_hidden = 2 # Number of hidden units
n_output = 1 # Number of output units
# Weights for inputs to hidden layer
W1 = torch.randn(n_input, n_hidden)
# Weights for hidden layer to output layer
W2 = torch.randn(n_hidden, n_output)
# and bias terms for hidden and output layers
B1 = torch.randn((1, n_hidden))
B2 = torch.randn((1, n_output))
```
> **Exercise:** Calculate the output for this multi-layer network using the weights `W1` & `W2`, and the biases, `B1` & `B2`.
```
## Your solution here
hiddenLayer = activation(torch.mm(features, W1) + B1)
outputLayer = activation(torch.mm(hiddenLayer, W2) + B2)
print(outputLayer)
```
If you did this correctly, you should see the output `tensor([[ 0.3171]])`.
The number of hidden units a parameter of the network, often called a **hyperparameter** to differentiate it from the weights and biases parameters. As you'll see later when we discuss training a neural network, the more hidden units a network has, and the more layers, the better able it is to learn from data and make accurate predictions.
## Numpy to Torch and back
Special bonus section! PyTorch has a great feature for converting between Numpy arrays and Torch tensors. To create a tensor from a Numpy array, use `torch.from_numpy()`. To convert a tensor to a Numpy array, use the `.numpy()` method.
```
import numpy as np
a = np.random.rand(4,3)
a
b = torch.from_numpy(a)
b
b.numpy()
```
The memory is shared between the Numpy array and Torch tensor, so if you change the values in-place of one object, the other will change as well.
```
# Multiply PyTorch Tensor by 2, in place
b.mul_(2)
# Numpy array matches new values from Tensor
a
```
|
github_jupyter
|
# First, import PyTorch
import torch
def activation(x):
""" Sigmoid activation function
Arguments
---------
x: torch.Tensor
"""
return 1/(1+torch.exp(-x))
### Generate some data
torch.manual_seed(7) # Set the random seed so things are predictable
# Features are 5 random normal variables
features = torch.randn((1, 5))
# True weights for our data, random normal variables again
weights = torch.randn_like(features)
# and a true bias term
bias = torch.randn((1, 1))
## Calculate the output of this network using the weights and bias tensors
print( activation(torch.sum(features*weights) + bias))
>> torch.mm(features, weights)
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-13-15d592eb5279> in <module>()
----> 1 torch.mm(features, weights)
RuntimeError: size mismatch, m1: [1 x 5], m2: [1 x 5] at /Users/soumith/minicondabuild3/conda-bld/pytorch_1524590658547/work/aten/src/TH/generic/THTensorMath.c:2033
## Calculate the output of this network using matrix multiplication
activation(torch.mm(features, weights.view(5,1)) + bias)
### Generate some data
torch.manual_seed(7) # Set the random seed so things are predictable
# Features are 3 random normal variables
features = torch.randn((1, 3))
# Define the size of each layer in our network
n_input = features.shape[1] # Number of input units, must match number of input features
n_hidden = 2 # Number of hidden units
n_output = 1 # Number of output units
# Weights for inputs to hidden layer
W1 = torch.randn(n_input, n_hidden)
# Weights for hidden layer to output layer
W2 = torch.randn(n_hidden, n_output)
# and bias terms for hidden and output layers
B1 = torch.randn((1, n_hidden))
B2 = torch.randn((1, n_output))
## Your solution here
hiddenLayer = activation(torch.mm(features, W1) + B1)
outputLayer = activation(torch.mm(hiddenLayer, W2) + B2)
print(outputLayer)
import numpy as np
a = np.random.rand(4,3)
a
b = torch.from_numpy(a)
b
b.numpy()
# Multiply PyTorch Tensor by 2, in place
b.mul_(2)
# Numpy array matches new values from Tensor
a
| 0.798423 | 0.994174 |
# Predicting User Satisfaction with Amazon Alexa
## Logistic Regression + TF-IDF vs. BERT encoding for the Star Rating Prediction
### By Elena Korshakova and Diedre Brown
This notebook compares the performance of the TF-IDF encoding method with the BERT encoding method in combination with the generalized linear model such as logistic regression. We aim to compare the performance of different encoding methods based on the accuracy. F-1 score was used as an additional metric because ratings were unbalanced (skewed towards 5-star score).
## Import Libraries
```
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score, accuracy_score
from sklearn.model_selection import GridSearchCV
from bert_embedding import BertEmbedding
import warnings
warnings.filterwarnings('ignore')
```
## Load preprocessed data
```
train = pd.read_pickle("data/df_train.pickle")
test = pd.read_pickle("data/df_test.pickle")
train
```
# 1. Logistic Regression + TF-IDF features
## Transform reviews into features (TF-IDF encoding)
We started to transform the reviews into features using the TF-IDF as term-weighting method intended to reflect how important a word is in the review. In our case the TF–IDF value increases proportionally to the number of times a word appears in the review and is offset by the number of reviews in the corpus that contain the word, which helps to adjust for the fact that some words appear more frequently in general.
```
vectoriser = TfidfVectorizer()
# Transfrom training data
X = vectoriser.fit_transform(train['review'])
y = train['rating']
X.shape
# Transform test data
X_test = vectoriser.transform(test['review'])
y_test = test['rating']
X_test.shape
```
## Hyperparameter tuning (Logistic Regression)
To get the best paremetrs for our data we started with the hyperparametr tuning using GridSearchCV to evaluate all the possible combinations of parameter values and retain the best combination.
```
# Make validation split
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size = 0.15, random_state = 100)
# Create a parameter grid
param_grid = {
'penalty': ['l1', 'l2', 'elasticnet'],
'C': np.arange(0.1, 5, 0.2),
'solver': ['lbfgs', 'liblinear'],
'class_weight': ['balanced']
}
# Create grid search object
clf = GridSearchCV(LogisticRegression(max_iter = 500), param_grid = param_grid, cv = 5, n_jobs=-1)
# Fit on data
best_clf = clf.fit(X_train, y_train)
best_clf.best_params_
model = best_clf.best_estimator_
preds_val = model.predict(X_val)
preds_train = model.predict(X_train)
print("Training accuracy score: ", np.round(accuracy_score(y_train, preds_train), 4))
print("Validation accuracy score: ", np.round(accuracy_score(y_val, preds_val), 4))
print("Training F1 score: ", np.round(f1_score(y_train, preds_train, average='weighted'), 4))
print("Validation F1 score: ", np.round(f1_score(y_val, preds_val, average='weighted'), 4))
```
As a result we got 77% traning accuracy (F1=76%) and 71% validation accuracy (F1=70%).
## Refit the best model and predict on test dataset
Based on the hyperparametr tuning results we refit the model on the train dataset and predict on test dataset
```
model = best_clf.best_estimator_
# Refit the model on the full training set
model.fit(X, y)
preds_test = model.predict(X_test)
print("Accuracy score on the test set: ", np.round(accuracy_score(y_test, preds_test), 4))
print("F1 score on the test set: ", np.round(f1_score(y_test, preds_test, average='weighted'), 4))
```
We` got 68% accuracy (F1=68) using TF-IDF encoding and logistic regression model
# 2. Logistic Regression + BERT embeddings
The next step aims to increase the accuracy using BERT encoding method. BERT considers all the words of the input reviews simultaneously and then uses an attention mechanism to develop a contextual meaning of the words within each review.
## Transform reviews into features (embeddings)
```
train
bert_embedding = BertEmbedding()
def get_embedding(review):
"""Return mean of word embeddings for a reivew"""
row_embeddings = bert_embedding(review.split('/n'))[0][1:]
avg_embedding = np.mean(row_embeddings, axis=1)[0].tolist()
return avg_embedding
%%time
# Get embeddings for training set
X_emb = np.array(list(train['review'].apply(get_embedding)))
y_emb = train['rating']
X_emb.shape
# Save train features to pickle file
train_emb = pd.DataFrame(X_emb)
train_emb['y'] = y_emb
train_emb.to_pickle("data/df_train_emb.pickle")
%%time
# Get embeddings for test set
X_emb_test = np.array(list(test['review'].apply(get_embedding)))
y_emb_test = test['rating']
X_emb_test.shape
# Save test features to pickle file
test_emb = pd.DataFrame(X_emb_test)
test_emb['y'] = y_emb_test
test_emb.to_pickle("data/df_test_emb.pickle")
```
## Hyperparameter tuning (Logistic Regression)
We are using exactly the same schema for the logistic regression to compare encoding performance.
```
# Make validation split
X_train_emb, X_val_emb, y_train_emb, y_val_emb = train_test_split(X_emb, y_emb, test_size = 0.15, random_state = 100)
X_train_emb.shape
# Create a parameter grid
param_grid = {
'penalty': ['l1', 'l2', 'elasticnet'],
'C': np.arange(0.1, 5, 0.5),
'solver': ['saga'],
'class_weight': ['balanced']
}
# Create grid search object
clf = GridSearchCV(LogisticRegression(max_iter = 100), param_grid = param_grid, cv = 5, n_jobs=-1)
# Fit on data
best_clf = clf.fit(X_train_emb, y_train_emb)
best_clf.best_params_
model = best_clf.best_estimator_
preds_val = model.predict(X_val_emb)
preds_train = model.predict(X_train_emb)
print("Training accuracy score: ", np.round(accuracy_score(y_train_emb, preds_train), 4))
print("Validation accuracy score: ", np.round(accuracy_score(y_val_emb, preds_val), 4))
print("Training F1 score: ", np.round(f1_score(y_train_emb, preds_train, average='weighted'), 4))
print("Validation F1 score: ", np.round(f1_score(y_val_emb, preds_val, average='weighted'), 4))
```
## Refit the best model and predict on test dataset
```
model = best_clf.best_estimator_
# Refit the model on the full training set
model.fit(X_emb, y_emb)
preds_test = model.predict(X_emb_test)
print("Accuracy score on the test set: ", np.round(accuracy_score(y_emb_test, preds_test), 4))
print("F1 score on the test set: ", np.round(f1_score(y_emb_test, preds_test, average='weighted'), 4))
```
As a result BERT embeddings didn't give a boost in performance and we got only 49% accuracy (F-1=56%).
|
github_jupyter
|
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score, accuracy_score
from sklearn.model_selection import GridSearchCV
from bert_embedding import BertEmbedding
import warnings
warnings.filterwarnings('ignore')
train = pd.read_pickle("data/df_train.pickle")
test = pd.read_pickle("data/df_test.pickle")
train
vectoriser = TfidfVectorizer()
# Transfrom training data
X = vectoriser.fit_transform(train['review'])
y = train['rating']
X.shape
# Transform test data
X_test = vectoriser.transform(test['review'])
y_test = test['rating']
X_test.shape
# Make validation split
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size = 0.15, random_state = 100)
# Create a parameter grid
param_grid = {
'penalty': ['l1', 'l2', 'elasticnet'],
'C': np.arange(0.1, 5, 0.2),
'solver': ['lbfgs', 'liblinear'],
'class_weight': ['balanced']
}
# Create grid search object
clf = GridSearchCV(LogisticRegression(max_iter = 500), param_grid = param_grid, cv = 5, n_jobs=-1)
# Fit on data
best_clf = clf.fit(X_train, y_train)
best_clf.best_params_
model = best_clf.best_estimator_
preds_val = model.predict(X_val)
preds_train = model.predict(X_train)
print("Training accuracy score: ", np.round(accuracy_score(y_train, preds_train), 4))
print("Validation accuracy score: ", np.round(accuracy_score(y_val, preds_val), 4))
print("Training F1 score: ", np.round(f1_score(y_train, preds_train, average='weighted'), 4))
print("Validation F1 score: ", np.round(f1_score(y_val, preds_val, average='weighted'), 4))
model = best_clf.best_estimator_
# Refit the model on the full training set
model.fit(X, y)
preds_test = model.predict(X_test)
print("Accuracy score on the test set: ", np.round(accuracy_score(y_test, preds_test), 4))
print("F1 score on the test set: ", np.round(f1_score(y_test, preds_test, average='weighted'), 4))
train
bert_embedding = BertEmbedding()
def get_embedding(review):
"""Return mean of word embeddings for a reivew"""
row_embeddings = bert_embedding(review.split('/n'))[0][1:]
avg_embedding = np.mean(row_embeddings, axis=1)[0].tolist()
return avg_embedding
%%time
# Get embeddings for training set
X_emb = np.array(list(train['review'].apply(get_embedding)))
y_emb = train['rating']
X_emb.shape
# Save train features to pickle file
train_emb = pd.DataFrame(X_emb)
train_emb['y'] = y_emb
train_emb.to_pickle("data/df_train_emb.pickle")
%%time
# Get embeddings for test set
X_emb_test = np.array(list(test['review'].apply(get_embedding)))
y_emb_test = test['rating']
X_emb_test.shape
# Save test features to pickle file
test_emb = pd.DataFrame(X_emb_test)
test_emb['y'] = y_emb_test
test_emb.to_pickle("data/df_test_emb.pickle")
# Make validation split
X_train_emb, X_val_emb, y_train_emb, y_val_emb = train_test_split(X_emb, y_emb, test_size = 0.15, random_state = 100)
X_train_emb.shape
# Create a parameter grid
param_grid = {
'penalty': ['l1', 'l2', 'elasticnet'],
'C': np.arange(0.1, 5, 0.5),
'solver': ['saga'],
'class_weight': ['balanced']
}
# Create grid search object
clf = GridSearchCV(LogisticRegression(max_iter = 100), param_grid = param_grid, cv = 5, n_jobs=-1)
# Fit on data
best_clf = clf.fit(X_train_emb, y_train_emb)
best_clf.best_params_
model = best_clf.best_estimator_
preds_val = model.predict(X_val_emb)
preds_train = model.predict(X_train_emb)
print("Training accuracy score: ", np.round(accuracy_score(y_train_emb, preds_train), 4))
print("Validation accuracy score: ", np.round(accuracy_score(y_val_emb, preds_val), 4))
print("Training F1 score: ", np.round(f1_score(y_train_emb, preds_train, average='weighted'), 4))
print("Validation F1 score: ", np.round(f1_score(y_val_emb, preds_val, average='weighted'), 4))
model = best_clf.best_estimator_
# Refit the model on the full training set
model.fit(X_emb, y_emb)
preds_test = model.predict(X_emb_test)
print("Accuracy score on the test set: ", np.round(accuracy_score(y_emb_test, preds_test), 4))
print("F1 score on the test set: ", np.round(f1_score(y_emb_test, preds_test, average='weighted'), 4))
| 0.78535 | 0.959687 |
## Data types
### Numbers
```
1 + 1
1 * 3
1 / 2
2 ** 4
4 % 2
5 % 2
```
### Variable Assignment
```
# Can not start with number or special characters
name_of_var = 2
x = 2
y = 3
z = x + y
z
```
### Strings
```
'single quotes'
"double quotes"
" wrap lot's of other quotes"
```
### Printing
```
x = 'hello'
x
print(x)
num = 12
name = 'Sam'
print('My number is: {one}, and my name is: {two}'.format(one=num,two=name))
print('My number is: {}, and my name is: {}'.format(num,name))
```
### Lists
```
[1,2,3]
['hi',1,[1,2]]
my_list = ['a','b','c']
my_list.append('d')
my_list
my_list[0]
my_list[1]
my_list[1:]
my_list[:1]
my_list[0] = 'NEW'
my_list
nest = [1,2,3,[4,5,['target']]]
nest[3]
nest[3][2]
nest[3][2][0]
```
### Dictionaries
```
d = {'key1':'item1','key2':'item2'}
d
d['key1']
```
### Booleans
```
True
False
```
### Tuples
```
t = (1,2,3)
t[0]
t[0] = 'NEW'
```
### Sets
```
{1,2,3}
{1,2,3,1,2,1,2,3,3,3,3,2,2,2,1,1,2}
```
## Comparison Operators
```
1 > 2
1 < 2
1 >= 1
1 <= 4
1 == 1
'hi' == 'bye'
```
## Logic Operators
```
(1 > 2) and (2 < 3)
(1 > 2) or (2 < 3)
(1 == 2) or (2 == 3) or (4 == 4)
```
## if,elif, else Statements
```
if 1 < 2:
print('Yep!')
if 1 < 2:
print('yep!')
if 1 < 2:
print('first')
else:
print('last')
if 1 > 2:
print('first')
else:
print('last')
if 1 == 2:
print('first')
elif 3 == 3:
print('middle')
else:
print('Last')
```
## for Loops
```
seq = [1,2,3,4,5]
for item in seq:
print(item)
for item in seq:
print('Yep')
for jelly in seq:
print(jelly+jelly)
```
## while Loops
```
i = 1
while i < 5:
print('i is: {}'.format(i))
i = i+1
```
## range()
```
range(5)
for i in range(5):
print(i)
list(range(5))
```
## list comprehension
```
x = [1,2,3,4]
out = []
for item in x:
out.append(item**2)
print(out)
[item**2 for item in x]
```
## functions
```
def my_func(param1='default'):
"""
Docstring goes here.
"""
print(param1)
my_func
my_func()
my_func('new param')
my_func(param1='new param')
def square(x):
return x**2
out = square(2)
print(out)
```
## lambda expressions
```
def times2(var):
return var*2
times2(2)
lambda var: var*2
```
## map and filter
```
seq = [1,2,3,4,5]
map(times2,seq)
list(map(times2,seq))
list(map(lambda var: var*2,seq))
filter(lambda item: item%2 == 0,seq)
list(filter(lambda item: item%2 == 0,seq))
```
## methods
```
st = 'hello my name is Sam'
st.lower()
st.upper()
st.split()
tweet = 'Go Sports! #Sports'
tweet.split('#')
tweet.split('#')[1]
d
d.keys()
d.items()
lst = [1,2,3]
lst.pop()
lst
'x' in [1,2,3]
'x' in ['x','y','z']
```
# Great Job!
|
github_jupyter
|
1 + 1
1 * 3
1 / 2
2 ** 4
4 % 2
5 % 2
# Can not start with number or special characters
name_of_var = 2
x = 2
y = 3
z = x + y
z
'single quotes'
"double quotes"
" wrap lot's of other quotes"
x = 'hello'
x
print(x)
num = 12
name = 'Sam'
print('My number is: {one}, and my name is: {two}'.format(one=num,two=name))
print('My number is: {}, and my name is: {}'.format(num,name))
[1,2,3]
['hi',1,[1,2]]
my_list = ['a','b','c']
my_list.append('d')
my_list
my_list[0]
my_list[1]
my_list[1:]
my_list[:1]
my_list[0] = 'NEW'
my_list
nest = [1,2,3,[4,5,['target']]]
nest[3]
nest[3][2]
nest[3][2][0]
d = {'key1':'item1','key2':'item2'}
d
d['key1']
True
False
t = (1,2,3)
t[0]
t[0] = 'NEW'
{1,2,3}
{1,2,3,1,2,1,2,3,3,3,3,2,2,2,1,1,2}
1 > 2
1 < 2
1 >= 1
1 <= 4
1 == 1
'hi' == 'bye'
(1 > 2) and (2 < 3)
(1 > 2) or (2 < 3)
(1 == 2) or (2 == 3) or (4 == 4)
if 1 < 2:
print('Yep!')
if 1 < 2:
print('yep!')
if 1 < 2:
print('first')
else:
print('last')
if 1 > 2:
print('first')
else:
print('last')
if 1 == 2:
print('first')
elif 3 == 3:
print('middle')
else:
print('Last')
seq = [1,2,3,4,5]
for item in seq:
print(item)
for item in seq:
print('Yep')
for jelly in seq:
print(jelly+jelly)
i = 1
while i < 5:
print('i is: {}'.format(i))
i = i+1
range(5)
for i in range(5):
print(i)
list(range(5))
x = [1,2,3,4]
out = []
for item in x:
out.append(item**2)
print(out)
[item**2 for item in x]
def my_func(param1='default'):
"""
Docstring goes here.
"""
print(param1)
my_func
my_func()
my_func('new param')
my_func(param1='new param')
def square(x):
return x**2
out = square(2)
print(out)
def times2(var):
return var*2
times2(2)
lambda var: var*2
seq = [1,2,3,4,5]
map(times2,seq)
list(map(times2,seq))
list(map(lambda var: var*2,seq))
filter(lambda item: item%2 == 0,seq)
list(filter(lambda item: item%2 == 0,seq))
st = 'hello my name is Sam'
st.lower()
st.upper()
st.split()
tweet = 'Go Sports! #Sports'
tweet.split('#')
tweet.split('#')[1]
d
d.keys()
d.items()
lst = [1,2,3]
lst.pop()
lst
'x' in [1,2,3]
'x' in ['x','y','z']
| 0.197948 | 0.856573 |
#1.2
```
#Exercise 1
name = "Julie"
age = "42"
sentence ="Hi my name is Julie and I am 42 years old."
print(sentence.format(name,age))
#Exercise 2
year = 1830
if (year >= 2000) and (year <= 2100):
print("Welcome to the 21st century")
else:
print("You are before or after the 21st century")
#Exercise 3
"hello"*3
#Exercise 4
shoes = ["Spizikes", "Air Force 1", "Curry 2", "Melo 5"]
print(shoes)
#Exercise 5
numbers = [ 76, 83, 16, 69, 52, 78, 10, 77, 45, 52,
32, 17, 58, 54, 79, 72, 55, 50, 81, 74,
45, 33, 38, 10, 40, 44, 70, 81, 79, 28,
83, 41, 14, 16, 27, 38, 20, 84, 24, 50,
59, 71, 1, 13, 56, 91, 29, 54, 65, 23,
60, 57, 13, 39, 58, 94, 94, 42, 46, 58,
59, 29, 69, 60, 83, 9, 83, 5, 64, 70,
55, 89, 67, 89, 70, 8, 90, 17, 48, 17,
94, 18, 98, 72, 96, 26, 13, 7, 58, 67,
38, 48, 43, 98, 65, 8, 74, 44, 92 ]
for number in numbers:
if number > 90:
print(number)
#Exercise 6
words = {"PoGo":"Slang for Pokemon Go" ,"Spange": "To collect spare change, either from couches, passerbys on the street or any numerous other ways and means","Lie-Fi": "When your phone or tablet indicates that you are connected to a wireless network, however you are still unable to load webpages or use any internet services with your device"}
words["PoGo"]
#words["Spange"]
#words["Lie-Fi"]
year = 2010
make = 'made in japan'
model = 'model 2010'
#Exercise 7
class Car:
def __init__(self, year, make, model):
self.year = year
self.make = make
self.model = model
def age(self):
return 2019 - self.year
car_object = Car(2014, 'Made in Italy', 'Toyota')
car_object.age()
```
#1.2.2
```
#Create a program that lists all of the unique words in a song and displays the count of each unique word.
import re
user_str = """
Work, work, work, work, work, work
He said me haffi
Work, work, work, work, work, work
He see me do mi
Dirt, dirt, dirt, dirt, dirt, dirt
So me put in
Work, work, work, work, work, work
When you ah gon'
Learn, learn, learn, learn, learn
Me nuh care if him
Hurt, hurt, hurt, hurt, hurting
Dry, me a desert him
Nuh time to have you lurking
Him ah go act like he nuh like it
You know I dealt with you the nicest
Nuh body touch me you nuh righteous
Nuh badda, text me in a crisis
I believed all of your dreams, adoration
You took my heart and my keys and my patience
You took my heart on my sleeve for decoration
You mistaken my love I brought for you for foundation
All that I wanted from you was to give me
Something that I never had
Something that you've never seen
Something
"""
# Split words to list
x = user_str.lower().split()
# Clean words by removing non-alphanumeric
clean_x = [re.sub(r'\W+', '', word) for word in x]
x = {}
for y in clean_x:
x[y] = x.get(y, 0)+ 1
print(x)
```
#1.2.3
```
# What is 7 to the power of 4?
print (7**4)
#Split the string
string = "Hi there Sam!"
print(string.split())
#Given the variables
planet = "Earth"
diameter = 12742
f"The diameter of {planet} is {diameter} kilometers."
#Given this nested list, use indexing to grab the word "hello"
lst = [1,2,[3,4],[5,[100,200,['hello']],23,11],1,7]
lst[3][1][2][0]
#Given this nested dictionary grab the word "hello". Be prepared, this will be annoying/tricky
d = {'k1':[1,2,3,{'tricky':['oh','man','inception',{'target':[1,2,3,'hello']}]}]}
d.keys()
d["k1"][3]['tricky'][3]["target"][3]
#What is the main difference between a tuple and a list?
#tuple is immutable
t = (1, 2, 3)
list = [1, 2, 3, 4, 5]
#Create a function that grabs the email website domain from a string in the form:
'user@domain.com'.split('@')[1]
#Create a basic function that returns True if the word 'dog' is contained in the input string. Don't worry about edge cases like a punctuation being attached to the word dog, but do account for capitalization.
def findDog(st):
if 'dog' in st.lower():
print("True")
else:
print("False")
st = input("Please key a string: >")
findDog(st)
#Create a function that counts the number of times the word "dog" occurs in a string. Again ignore edge cases.
string = input("Please enter your string: ")
def countdogs(string):
count = 0
for word in string.lower().split():
if word == 'dog' or word == 'dogs':
count = count + 1
print(count)
countdogs(string)
# Use lambda expressions and the filter() function to filter out words from a list that don't start with the letter 's'. For example:
seq = ['soup','dog','salad','cat','great']
list(filter(lambda word: word[0]=='s',seq))
#final problem
print("Please enter the speed(km/h)(only number please): \n")
speed = int(input("> "))
print("Please enter your birthday: (in DD/MM/YYYY format)\n")
birthday = str(input("> "))
def speeding(speed, birthday):
if birthday == '29/08/1989':
s = speed - 5
else:
s = speed
if s <= 60:
print("You pass.")
elif s > 61 and s <= 80:
print("You get a small ticket")
else:
print("You get a big ticket.")
speeding(speed, birthday)
```
|
github_jupyter
|
#Exercise 1
name = "Julie"
age = "42"
sentence ="Hi my name is Julie and I am 42 years old."
print(sentence.format(name,age))
#Exercise 2
year = 1830
if (year >= 2000) and (year <= 2100):
print("Welcome to the 21st century")
else:
print("You are before or after the 21st century")
#Exercise 3
"hello"*3
#Exercise 4
shoes = ["Spizikes", "Air Force 1", "Curry 2", "Melo 5"]
print(shoes)
#Exercise 5
numbers = [ 76, 83, 16, 69, 52, 78, 10, 77, 45, 52,
32, 17, 58, 54, 79, 72, 55, 50, 81, 74,
45, 33, 38, 10, 40, 44, 70, 81, 79, 28,
83, 41, 14, 16, 27, 38, 20, 84, 24, 50,
59, 71, 1, 13, 56, 91, 29, 54, 65, 23,
60, 57, 13, 39, 58, 94, 94, 42, 46, 58,
59, 29, 69, 60, 83, 9, 83, 5, 64, 70,
55, 89, 67, 89, 70, 8, 90, 17, 48, 17,
94, 18, 98, 72, 96, 26, 13, 7, 58, 67,
38, 48, 43, 98, 65, 8, 74, 44, 92 ]
for number in numbers:
if number > 90:
print(number)
#Exercise 6
words = {"PoGo":"Slang for Pokemon Go" ,"Spange": "To collect spare change, either from couches, passerbys on the street or any numerous other ways and means","Lie-Fi": "When your phone or tablet indicates that you are connected to a wireless network, however you are still unable to load webpages or use any internet services with your device"}
words["PoGo"]
#words["Spange"]
#words["Lie-Fi"]
year = 2010
make = 'made in japan'
model = 'model 2010'
#Exercise 7
class Car:
def __init__(self, year, make, model):
self.year = year
self.make = make
self.model = model
def age(self):
return 2019 - self.year
car_object = Car(2014, 'Made in Italy', 'Toyota')
car_object.age()
#Create a program that lists all of the unique words in a song and displays the count of each unique word.
import re
user_str = """
Work, work, work, work, work, work
He said me haffi
Work, work, work, work, work, work
He see me do mi
Dirt, dirt, dirt, dirt, dirt, dirt
So me put in
Work, work, work, work, work, work
When you ah gon'
Learn, learn, learn, learn, learn
Me nuh care if him
Hurt, hurt, hurt, hurt, hurting
Dry, me a desert him
Nuh time to have you lurking
Him ah go act like he nuh like it
You know I dealt with you the nicest
Nuh body touch me you nuh righteous
Nuh badda, text me in a crisis
I believed all of your dreams, adoration
You took my heart and my keys and my patience
You took my heart on my sleeve for decoration
You mistaken my love I brought for you for foundation
All that I wanted from you was to give me
Something that I never had
Something that you've never seen
Something
"""
# Split words to list
x = user_str.lower().split()
# Clean words by removing non-alphanumeric
clean_x = [re.sub(r'\W+', '', word) for word in x]
x = {}
for y in clean_x:
x[y] = x.get(y, 0)+ 1
print(x)
# What is 7 to the power of 4?
print (7**4)
#Split the string
string = "Hi there Sam!"
print(string.split())
#Given the variables
planet = "Earth"
diameter = 12742
f"The diameter of {planet} is {diameter} kilometers."
#Given this nested list, use indexing to grab the word "hello"
lst = [1,2,[3,4],[5,[100,200,['hello']],23,11],1,7]
lst[3][1][2][0]
#Given this nested dictionary grab the word "hello". Be prepared, this will be annoying/tricky
d = {'k1':[1,2,3,{'tricky':['oh','man','inception',{'target':[1,2,3,'hello']}]}]}
d.keys()
d["k1"][3]['tricky'][3]["target"][3]
#What is the main difference between a tuple and a list?
#tuple is immutable
t = (1, 2, 3)
list = [1, 2, 3, 4, 5]
#Create a function that grabs the email website domain from a string in the form:
'user@domain.com'.split('@')[1]
#Create a basic function that returns True if the word 'dog' is contained in the input string. Don't worry about edge cases like a punctuation being attached to the word dog, but do account for capitalization.
def findDog(st):
if 'dog' in st.lower():
print("True")
else:
print("False")
st = input("Please key a string: >")
findDog(st)
#Create a function that counts the number of times the word "dog" occurs in a string. Again ignore edge cases.
string = input("Please enter your string: ")
def countdogs(string):
count = 0
for word in string.lower().split():
if word == 'dog' or word == 'dogs':
count = count + 1
print(count)
countdogs(string)
# Use lambda expressions and the filter() function to filter out words from a list that don't start with the letter 's'. For example:
seq = ['soup','dog','salad','cat','great']
list(filter(lambda word: word[0]=='s',seq))
#final problem
print("Please enter the speed(km/h)(only number please): \n")
speed = int(input("> "))
print("Please enter your birthday: (in DD/MM/YYYY format)\n")
birthday = str(input("> "))
def speeding(speed, birthday):
if birthday == '29/08/1989':
s = speed - 5
else:
s = speed
if s <= 60:
print("You pass.")
elif s > 61 and s <= 80:
print("You get a small ticket")
else:
print("You get a big ticket.")
speeding(speed, birthday)
| 0.264453 | 0.645302 |
# IHME PROJECTIONS
IHME's COVID-19 projections were developed in response to requests from the University of Washington School of Medicine and other US hospital systems and state governments working to determine when COVID-19 would overwhelm their ability to care for patients. The forecasts show demand for hospital services, including the availability of ventilators, general hospital beds, and ICU beds, as well as daily and cumulative deaths due to COVID-19.
```
import pandas as pd
import requests
import pycountry
import zipfile
from functools import reduce
import io
import re
from datetime import datetime
from csv import QUOTE_NONNUMERIC
# papermill parameters
output_folder = "../output/"
# get .zip file content
response = requests.get("https://ihmecovid19storage.blob.core.windows.net/latest/ihme-covid19.zip", stream=True)
assert response.status_code is 200
# parse .zip
z = zipfile.ZipFile(io.BytesIO(response.content))
# create df from csv in zip
df = pd.read_csv(z.open(re.search(r".*\.csv", "\n".join(z.namelist()))[0]))
# set columns Last_Update_Date and LAST_REPORTED_FLAG
df['Last_Update_Date'] = datetime.now()
df['Last_Reported_Flag'] = df['date'].max() == df['date']
regions_manual_dict = {
"Balearic Islands": ("ES", "Spain", "ES-IB"),
"Basque Country": ("ES", "Spain", "ES-PV"),
"Bavaria": ("DE", "Germany", "DE-BY"),
"Canary Islands": ("ES", "Spain", "ES-CN"),
"Castile and Leon": ("ES", "Spain", "ES-CL"),
"Catalonia": ("ES", "Spain", "ES-CT"),
"Community of Madrid": ("ES", "Spain", "ES-MD"),
"King and Snohomish Counties (excluding Life Care Center), WA": ("US", "United States", "US-WA"),
"Life Care Center, Kirkland, WA": ("US", "United States", "US-WA"),
"Lower Saxony": ("DE", "Germany", "DE-NI"),
"Navarre": ("ES", "Spain", "ES-NA"),
"North Rhine-Westphalia": ("DE", "Germany", "DE-NW"),
"Other Counties, WA": ("US", "United States", "US-WA"),
"Provincia autonoma di Bolzano": ("IT", "Italy", "IT-BZ"),
"Provincia autonoma di Trento": ("IT", "Italy", "IT-TN"),
"Rhineland-Palatinate": ("DE", "Germany", "DE-RP"),
"Saxony-Anhalt": ("DE", "Germany", "DE-ST"),
"Saxony": ("DE", "Germany", "DE-SN"),
"Thuringia": ("DE", "Germany", "DE-TH"),
"Valencian Community": ("ES", "Spain", "ES-VC")
}
subdivisions = {}
# create country resolver helper func
def resolve_country(location_name):
country_code, country_name, subdiv_code = None, None, None
lookup = pycountry.countries.get(name=location_name)
if not lookup:
try:
lookup = pycountry.countries.search_fuzzy(location_name)[0]
country_name, country_code = lookup.name, lookup.alpha_2
if country_code not in list(subdivisions):
subdivisions[country_code] = {k.name: k.code.replace(f"{country_code}-", "") for k in pycountry.subdivisions.get(country_code=country_code)}
subdiv_code = subdivisions[country_code][location_name]
except (LookupError):
if location_name in list(regions_manual_dict):
country_code, country_name, subdiv_code = regions_manual_dict[location_name]
subdiv_code = subdiv_code.replace(f"{country_code}-", "")
else:
country_name, country_code = lookup.name, lookup.alpha_2
return country_name, country_code, subdiv_code
df['COUNTRY_REGION'] = None
df['ISO3166_1'] = None
df['ISO3166_2'] = None
# get distinct locations list
distinct_locations = list(df['location_name'].unique())
# iterate distinct_locations
for c in distinct_locations:
country_name, country_code, subdiv_code = resolve_country(c)
# set value where location_name == c
df['COUNTRY_REGION'].loc[df['location_name'] == c] = country_name
df['ISO3166_1'].loc[df['location_name'] == c] = country_code
df['ISO3166_2'].loc[df['location_name'] == c] = subdiv_code
# fix some subdivisions manually
df['ISO3166_2'].loc[(df['ISO3166_2'].isna()) & (df['location_name'] == 'Aragon')] = "AR"
df['ISO3166_2'].loc[(df['ISO3166_2'].isna()) & (df['location_name'] == 'Andalucia')] = "AN"
df['ISO3166_2'].loc[(df['ISO3166_2'].isna()) & (df['location_name'] == 'Baden-Wurttemberg')] = "BW"
df['ISO3166_2'].loc[(df['ISO3166_2'].isna()) & (df['location_name'] == 'Hesse')] = "HE"
df['date'] = pd.to_datetime(df['date'].astype(str), format='%Y-%m-%d')
# set province_state where subdivisions code exists
df['PROVINCE_STATE'] = None
df['PROVINCE_STATE'].loc[df['ISO3166_2'].notna()] = df.loc[df['ISO3166_2'].notna()]['location_name']
# drop cols
cols = list(df.columns) # dataset has uncertain columns, make sure not referencing any non-existing columns
drop_cols = list(filter(lambda col: col in cols, ['location_name', 'V1', 'location_id']))
df = df.drop(columns=drop_cols)
df.dtypes
df.sample(5)
df.to_csv(output_folder + "IHME_COVID_19.csv", columns=["date","allbed_mean","allbed_lower","allbed_upper","ICUbed_mean","ICUbed_lower","ICUbed_upper","InvVen_mean","InvVen_lower","InvVen_upper","deaths_mean","deaths_lower","deaths_upper","admis_mean","admis_lower","admis_upper","newICU_mean","newICU_lower","newICU_upper","totdea_mean","totdea_lower","totdea_upper","bedover_mean","bedover_lower","bedover_upper","icuover_mean","icuover_lower","icuover_upper","Last_Update_Date","Last_Reported_Flag","COUNTRY_REGION","ISO_3166_1","ISO_3166_2","PROVINCE_STATE"],
index=False, sep=",", quoting=QUOTE_NONNUMERIC)
```
|
github_jupyter
|
import pandas as pd
import requests
import pycountry
import zipfile
from functools import reduce
import io
import re
from datetime import datetime
from csv import QUOTE_NONNUMERIC
# papermill parameters
output_folder = "../output/"
# get .zip file content
response = requests.get("https://ihmecovid19storage.blob.core.windows.net/latest/ihme-covid19.zip", stream=True)
assert response.status_code is 200
# parse .zip
z = zipfile.ZipFile(io.BytesIO(response.content))
# create df from csv in zip
df = pd.read_csv(z.open(re.search(r".*\.csv", "\n".join(z.namelist()))[0]))
# set columns Last_Update_Date and LAST_REPORTED_FLAG
df['Last_Update_Date'] = datetime.now()
df['Last_Reported_Flag'] = df['date'].max() == df['date']
regions_manual_dict = {
"Balearic Islands": ("ES", "Spain", "ES-IB"),
"Basque Country": ("ES", "Spain", "ES-PV"),
"Bavaria": ("DE", "Germany", "DE-BY"),
"Canary Islands": ("ES", "Spain", "ES-CN"),
"Castile and Leon": ("ES", "Spain", "ES-CL"),
"Catalonia": ("ES", "Spain", "ES-CT"),
"Community of Madrid": ("ES", "Spain", "ES-MD"),
"King and Snohomish Counties (excluding Life Care Center), WA": ("US", "United States", "US-WA"),
"Life Care Center, Kirkland, WA": ("US", "United States", "US-WA"),
"Lower Saxony": ("DE", "Germany", "DE-NI"),
"Navarre": ("ES", "Spain", "ES-NA"),
"North Rhine-Westphalia": ("DE", "Germany", "DE-NW"),
"Other Counties, WA": ("US", "United States", "US-WA"),
"Provincia autonoma di Bolzano": ("IT", "Italy", "IT-BZ"),
"Provincia autonoma di Trento": ("IT", "Italy", "IT-TN"),
"Rhineland-Palatinate": ("DE", "Germany", "DE-RP"),
"Saxony-Anhalt": ("DE", "Germany", "DE-ST"),
"Saxony": ("DE", "Germany", "DE-SN"),
"Thuringia": ("DE", "Germany", "DE-TH"),
"Valencian Community": ("ES", "Spain", "ES-VC")
}
subdivisions = {}
# create country resolver helper func
def resolve_country(location_name):
country_code, country_name, subdiv_code = None, None, None
lookup = pycountry.countries.get(name=location_name)
if not lookup:
try:
lookup = pycountry.countries.search_fuzzy(location_name)[0]
country_name, country_code = lookup.name, lookup.alpha_2
if country_code not in list(subdivisions):
subdivisions[country_code] = {k.name: k.code.replace(f"{country_code}-", "") for k in pycountry.subdivisions.get(country_code=country_code)}
subdiv_code = subdivisions[country_code][location_name]
except (LookupError):
if location_name in list(regions_manual_dict):
country_code, country_name, subdiv_code = regions_manual_dict[location_name]
subdiv_code = subdiv_code.replace(f"{country_code}-", "")
else:
country_name, country_code = lookup.name, lookup.alpha_2
return country_name, country_code, subdiv_code
df['COUNTRY_REGION'] = None
df['ISO3166_1'] = None
df['ISO3166_2'] = None
# get distinct locations list
distinct_locations = list(df['location_name'].unique())
# iterate distinct_locations
for c in distinct_locations:
country_name, country_code, subdiv_code = resolve_country(c)
# set value where location_name == c
df['COUNTRY_REGION'].loc[df['location_name'] == c] = country_name
df['ISO3166_1'].loc[df['location_name'] == c] = country_code
df['ISO3166_2'].loc[df['location_name'] == c] = subdiv_code
# fix some subdivisions manually
df['ISO3166_2'].loc[(df['ISO3166_2'].isna()) & (df['location_name'] == 'Aragon')] = "AR"
df['ISO3166_2'].loc[(df['ISO3166_2'].isna()) & (df['location_name'] == 'Andalucia')] = "AN"
df['ISO3166_2'].loc[(df['ISO3166_2'].isna()) & (df['location_name'] == 'Baden-Wurttemberg')] = "BW"
df['ISO3166_2'].loc[(df['ISO3166_2'].isna()) & (df['location_name'] == 'Hesse')] = "HE"
df['date'] = pd.to_datetime(df['date'].astype(str), format='%Y-%m-%d')
# set province_state where subdivisions code exists
df['PROVINCE_STATE'] = None
df['PROVINCE_STATE'].loc[df['ISO3166_2'].notna()] = df.loc[df['ISO3166_2'].notna()]['location_name']
# drop cols
cols = list(df.columns) # dataset has uncertain columns, make sure not referencing any non-existing columns
drop_cols = list(filter(lambda col: col in cols, ['location_name', 'V1', 'location_id']))
df = df.drop(columns=drop_cols)
df.dtypes
df.sample(5)
df.to_csv(output_folder + "IHME_COVID_19.csv", columns=["date","allbed_mean","allbed_lower","allbed_upper","ICUbed_mean","ICUbed_lower","ICUbed_upper","InvVen_mean","InvVen_lower","InvVen_upper","deaths_mean","deaths_lower","deaths_upper","admis_mean","admis_lower","admis_upper","newICU_mean","newICU_lower","newICU_upper","totdea_mean","totdea_lower","totdea_upper","bedover_mean","bedover_lower","bedover_upper","icuover_mean","icuover_lower","icuover_upper","Last_Update_Date","Last_Reported_Flag","COUNTRY_REGION","ISO_3166_1","ISO_3166_2","PROVINCE_STATE"],
index=False, sep=",", quoting=QUOTE_NONNUMERIC)
| 0.320715 | 0.65484 |
# Jakten på Higgs - ett exempel från forskningen
I denna övning ska vi se på hur man sökte efter Higgsbosonen och hur vetenskaplig forskning ofta fungerar.
Datan som används här är äkta, betydelsefulla mätdata från de nobelprisbelönta experimenten som bevisade [Higgsbosonens](https://fi.wikipedia.org/wiki/Higgsin_bosoni) existens. Nu är dessa data också tillgängliga för dig så att du själv får spåra partikeln.
Tillvägagångssättet som beskrivs i detta dokument är allmänt och används inom många grenar av vetenskapen. När vi har någon sorts teoretisk modell för hur vi antar att resultaten borde se ut kan vi göra mätningar och jämföra våra resultat med modellen. Kanske resultaten bekräftar vår modell, kanske uppstår nya frågor och vi märker att vår teori måste förändras för att förklara resultaten. Vi genomgår den processen varv efter varv när vi borrar djupare in i kunskapen om världen. (Se ["den vetenskapliga metoden"](https://en.wikipedia.org/wiki/Scientific_method))
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
# Vi talar snart om dessa datafiler.
csvs = [pd.read_csv('http://opendata.cern.ch/record/5200/files/4mu_2011.csv'), pd.read_csv('http://opendata.cern.ch/record/5200/files/4e_2011.csv'), pd.read_csv('http://opendata.cern.ch/record/5200/files/2e2mu_2011.csv')]
csvs += [pd.read_csv('http://opendata.cern.ch/record/5200/files/4mu_2012.csv'), pd.read_csv('http://opendata.cern.ch/record/5200/files/4e_2012.csv'), pd.read_csv('http://opendata.cern.ch/record/5200/files/2e2mu_2012.csv')]
fourlep = pd.concat(csvs)
```
Enligt standardmodellens antaganden borde Higgspartikeln sönderfalla så, att två Z-bosoner uppstår. Z-bosonerna faller i sin tur sönder till fyra leptoner (gruppen leptoner inkluderar elektroner och myoner). Det är inte den enda processen som producerar leptoner, så vi behöver sålla bort bakgrundsbrus för att observera dessa händelser. Modellen har inte lyckats bestämma higgspartikelns massa, men med genomtänkta gissningar kommer man ganska långt. Man kunde se att sönderfallet till fyra leptoner dominerade vid vissa massor, och där kan vi börja vår sökning.
```
# Vi kan sätta in gränser för det område vi vill undersöka.
rmin = 70
rmax = 181
nbins = 37
M_hist = np.histogram(fourlep['M'], bins = nbins, range = (rmin,rmax))
hist, bins = M_hist
width = 1.0*(bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
```
Vi använder en simulation för att hjälpa vår forskning. Om vi har en modell för processer där kollisioner sker kan vi jämföra de förväntade resultaten med våra mätresultat. Nedan skapar vi en modell med hjälp av [Monte Carlo](https://sv.wikipedia.org/wiki/Monte_Carlo-metod)-metoden. Värdena är vägda enligt luminositet, tvärsnitt och antal observationer. I praktiken är värdena alltså slumpmässigt genererade enligt en teoretisk modell, som simulerar verkliga experimentresultat. De simulerade punkterna följer alltså samma fördelning som liknande experiment har visat sig följa.
```
dy = np.array([0,0,0,0,0,0.354797,0.177398,2.60481,0,0,0,0,0,0,0,0,0,0.177398,0.177398,0,0.177398,0,0,0,0,0,0,0,0,0,0,0,0.177398,0,0,0,0])
ttbar = np.array([0.00465086,0,0.00465086,0,0,0,0,0,0,0,0.00465086,0,0,0,0,0,0.00465086,0,0,0,0,0.00465086,0.00465086,0,0,0.0139526,0,0,0.00465086,0,0,0,0.00465086,0.00465086,0.0139526,0,0])
zz = np.array([0.181215,0.257161,0.44846,0.830071,1.80272,4.57354,13.9677,14.0178,4.10974,1.58934,0.989974,0.839775,0.887188,0.967021,1.07882,1.27942,1.36681,1.4333,1.45141,1.41572,1.51464,1.45026,1.47328,1.42899,1.38757,1.33561,1.3075,1.29831,1.31402,1.30672,1.36442,1.39256,1.43472,1.58321,1.85313,2.19304,2.95083])
hzz = np.array([0.00340992,0.00450225,0.00808944,0.0080008,0.00801578,0.0108945,0.00794274,0.00950757,0.0130648,0.0163568,0.0233832,0.0334813,0.0427229,0.0738129,0.13282,0.256384,0.648352,2.38742,4.87193,0.944299,0.155005,0.0374193,0.0138906,0.00630364,0.00419265,0.00358719,0.00122527,0.000885718,0.000590479,0.000885718,0.000797085,8.86337e-05,0.000501845,8.86337e-05,0.000546162,4.43168e-05,8.86337e-05])
```
Vi kikar på statistiken som bildas här och hur den förhåller sig till de verkliga resultaten från acceleratorn.
```
# ZZ, ett par tyngre bosoner.
plt.figure(figsize = (15,3))
plt.bar(center, zz, align = 'center', width = width, color = 'b', linewidth = 0, edgecolor = 'black', alpha = 0.5)
plt.xlabel('4 leptoner - invariant massa (GeV)', fontsize = 15)
plt.ylabel('Händelser / 3 GeV\n', fontsize = 15)
plt.xlim(rmin,rmax)
plt.show()
# DY, Händelser bestående av enskilda Z-bosoner
plt.figure(figsize = (15,3))
plt.bar(center, dy, align = 'center', width = width, color = 'g', linewidth = 0, edgecolor = 'black', alpha = 0.5)
plt.xlabel('4 leptoner - invariant massa (GeV)', fontsize = 15)
plt.ylabel('Händelser / 3 GeV\n', fontsize = 15)
plt.xlim(rmin,rmax)
plt.show()
# ttbar, par av top- och antitop-kvarkar.
plt.figure(figsize = (15,3))
plt.bar(center, ttbar, align = 'center', width = width, color = 'gray', linewidth = 0, edgecolor = 'b', alpha = 0.5)
plt.xlabel('4 leptoner - invariant massa (GeV)', fontsize = 15)
plt.ylabel('Händelser / 3 GeV \n', fontsize = 15)
plt.xlim(rmin,rmax)
plt.show()
```
Vi slår ihop dessa simulationer av bakgrundsbruset och ser hur vi kan förvänta oss att resultaten ska se ut.
```
plt.figure(figsize = (15,5))
# ttbar
ttbar_bar = plt.bar(center, ttbar, align = 'center', width = width, color = 'gray', linewidth = 0, edgecolor = 'b',
alpha = 0.5, label = r'$t\bar{t}$')
# DY
dy_bar = plt.bar(center, dy, align = 'center', width = width, color = 'g', linewidth = 0, edgecolor = 'black',
alpha = 0.5, bottom = ttbar, label = 'Z/$\gamma^{*}$ + X')
# ZZ
zz_bar = plt.bar(center, zz, align = 'center', width = width, color = 'b', linewidth = 0, edgecolor = 'black',
alpha = 0.5, bottom = ttbar+dy, label = r'ZZ $\rightarrow$ 4l')
plt.title('$ \sqrt{s} = 7$ TeV, L = 2.3 $fb^{-1}$; $\sqrt{s} = 8$ TeV, L = 11.6 $fb^{-1}$ \n', fontsize = 12)
plt.xlabel('$m_{4 leptoner}$ (GeV)', fontsize = 15)
plt.ylabel('Händelser / 3 GeV\n', fontsize = 15)
plt.ylim(0,25)
plt.xlim(rmin,rmax)
plt.legend(fontsize = 15)
plt.show()
```
Vi ser att det händer saker vid 90 GeV. Detta är Z-bosonens massa, så vi förstår att detta är produkter av Z-bosoners sönderfall. I följande cell markerar vi mätresultaten. Hur bra motsvarar bilderna varandra?
```
plt.figure(figsize = (15,5))
xerrs = [width*0.5 for i in range(0, nbins)]
yerrs = np.sqrt(hist)
# ttbar
ttbar_bar = plt.bar(center, ttbar, align = 'center', width = width, color = 'gray', linewidth = 0, edgecolor = 'b',
alpha = 0.5, label = r'$t\bar{t}$')
# DY
dy_bar = plt.bar(center, dy, align = 'center', width = width, color = 'g', linewidth = 0, edgecolor = 'black',
alpha = 0.5, bottom = ttbar, label = 'Z/$\gamma^{*}$ + X')
# ZZ
zz_bar = plt.bar(center, zz, align = 'center', width = width, color = 'b', linewidth = 0, edgecolor = 'black',
alpha = 0.5, bottom = ttbar+dy, label = r'ZZ $\rightarrow$ 4l')
# Mittaukset.
data_bar = plt.errorbar(center, hist, xerr = xerrs, yerr = yerrs, linestyle = 'None', color = 'black',
marker = 'o', label = 'Data')
plt.title('$ \sqrt{s} = 7$ TeV, L = 2.3 $fb^{-1}$; $\sqrt{s} = 8$ TeV, L = 11.6 $fb^{-1}$ \n', fontsize = 12)
plt.xlabel('$m_{4l}$ (GeV)', fontsize = 15)
plt.ylabel('Händelser / 3 GeV\n', fontsize = 15)
plt.ylim(0,25)
plt.xlim(rmin,rmax)
plt.legend(fontsize = 15)
plt.show()
```
Vår modell av bakgrundsbruset motsvaras inte helt av mätresultaten. För jämförelsens skull gjorde fysikerna beräkningar på sönderfallsenergierna från Higgsbosonen vid olika uppskattningar av bosonens massa. I nästa graf ser vi hurudan energifördelning som borde kunna mätas om Higgsbosonens massa var 125 GeV.
```
# HZZ, teoretiskt antagande om Higgsbosonens sönderfallsprodukter utgående från massan 125 GeV
plt.figure(figsize = (15,3))
plt.bar(center, hzz, align = 'center', width = width, color = 'w', linewidth = 1, edgecolor = 'r')
plt.xlabel('4 leptoner - invariant massa (GeV)', fontsize = 15)
plt.ylabel('Händelser / 3 GeV\n', fontsize = 15)
plt.xlim(rmin,rmax)
plt.show()
```
Bonusfråga: Hur kan en partikel med massan 125 GeV sönderfalla till två Z-bosoner med massa över 90 GeV?
Vi lägger till den senaste grafen till grafen över bakgrundsbruset.
```
plt.figure(figsize = (15,5))
# ttbar
ttbar_bar = plt.bar(center, ttbar, align = 'center', width = width, color = 'gray', linewidth = 0, edgecolor = 'b',
alpha = 0.5, label = r'$t\bar{t}$')
# DY
dy_bar = plt.bar(center, dy, align = 'center', width = width, color = 'g', linewidth = 0, edgecolor = 'black',
alpha = 0.5, bottom = ttbar, label = 'Z/$\gamma^{*}$ + X')
# ZZ
zz_bar = plt.bar(center, zz, align = 'center', width = width, color = 'b', linewidth = 0, edgecolor = 'black',
alpha = 0.5, bottom = ttbar+dy, label = r'ZZ $\rightarrow$ 4l')
# HZZ
hzz_bar = plt.bar(center, hzz, align = 'center', width = width, color = 'w', linewidth = 1, edgecolor = 'r',
bottom = ttbar+dy+zz, label = '$m_{H}$ = 125 GeV')
# Mittaukset.
data_bar = plt.errorbar(center, hist, xerr = xerrs, yerr = yerrs, linestyle = 'None', color = 'black',
marker = 'o', label = 'Data')
plt.title('$ \sqrt{s} = 7$ TeV, L = 2.3 $fb^{-1}$; $\sqrt{s} = 8$ TeV, L = 11.6 $fb^{-1}$ \n', fontsize = 12)
plt.xlabel('$m_{4 leptoner}$ (GeV)', fontsize = 15)
plt.ylabel('Händelser / 3 GeV\n', fontsize = 15)
plt.ylim(0,25)
plt.xlim(rmin,rmax)
plt.legend(fontsize = 15)
plt.show()
```
Mängden punkter som används här är ganska liten, men det ger en genuin bild in i forskningsarbetet. Det är en mycket liten andel av sönderfallen som resulterar i fyra leptoner (kriteriet för dessa mätresultat), så denna statistik består av mätpunkter som har samlats under åren 2011-2012. Mer info om mätningarna hittas [här](http://opendata.cern.ch/record/5500).
```
# Om man vill kontrollera datan närmare hittas information från alla observationer av fyra partiklar
pd.options.display.max_columns = 50
fourlep.head()
```
Det märks alltså tydligt att toppen vid 125 GeV är något utöver resultaten vi förutsett. Våra resultat går i riktning med CMS' analyser - den största skillnaden är noggrannheten i mätningarna.
<img src = 'https://inspirehep.net/files/6d3aa0c4fbefece34158f7f0c6e2e818'>
|
github_jupyter
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
# Vi talar snart om dessa datafiler.
csvs = [pd.read_csv('http://opendata.cern.ch/record/5200/files/4mu_2011.csv'), pd.read_csv('http://opendata.cern.ch/record/5200/files/4e_2011.csv'), pd.read_csv('http://opendata.cern.ch/record/5200/files/2e2mu_2011.csv')]
csvs += [pd.read_csv('http://opendata.cern.ch/record/5200/files/4mu_2012.csv'), pd.read_csv('http://opendata.cern.ch/record/5200/files/4e_2012.csv'), pd.read_csv('http://opendata.cern.ch/record/5200/files/2e2mu_2012.csv')]
fourlep = pd.concat(csvs)
# Vi kan sätta in gränser för det område vi vill undersöka.
rmin = 70
rmax = 181
nbins = 37
M_hist = np.histogram(fourlep['M'], bins = nbins, range = (rmin,rmax))
hist, bins = M_hist
width = 1.0*(bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
dy = np.array([0,0,0,0,0,0.354797,0.177398,2.60481,0,0,0,0,0,0,0,0,0,0.177398,0.177398,0,0.177398,0,0,0,0,0,0,0,0,0,0,0,0.177398,0,0,0,0])
ttbar = np.array([0.00465086,0,0.00465086,0,0,0,0,0,0,0,0.00465086,0,0,0,0,0,0.00465086,0,0,0,0,0.00465086,0.00465086,0,0,0.0139526,0,0,0.00465086,0,0,0,0.00465086,0.00465086,0.0139526,0,0])
zz = np.array([0.181215,0.257161,0.44846,0.830071,1.80272,4.57354,13.9677,14.0178,4.10974,1.58934,0.989974,0.839775,0.887188,0.967021,1.07882,1.27942,1.36681,1.4333,1.45141,1.41572,1.51464,1.45026,1.47328,1.42899,1.38757,1.33561,1.3075,1.29831,1.31402,1.30672,1.36442,1.39256,1.43472,1.58321,1.85313,2.19304,2.95083])
hzz = np.array([0.00340992,0.00450225,0.00808944,0.0080008,0.00801578,0.0108945,0.00794274,0.00950757,0.0130648,0.0163568,0.0233832,0.0334813,0.0427229,0.0738129,0.13282,0.256384,0.648352,2.38742,4.87193,0.944299,0.155005,0.0374193,0.0138906,0.00630364,0.00419265,0.00358719,0.00122527,0.000885718,0.000590479,0.000885718,0.000797085,8.86337e-05,0.000501845,8.86337e-05,0.000546162,4.43168e-05,8.86337e-05])
# ZZ, ett par tyngre bosoner.
plt.figure(figsize = (15,3))
plt.bar(center, zz, align = 'center', width = width, color = 'b', linewidth = 0, edgecolor = 'black', alpha = 0.5)
plt.xlabel('4 leptoner - invariant massa (GeV)', fontsize = 15)
plt.ylabel('Händelser / 3 GeV\n', fontsize = 15)
plt.xlim(rmin,rmax)
plt.show()
# DY, Händelser bestående av enskilda Z-bosoner
plt.figure(figsize = (15,3))
plt.bar(center, dy, align = 'center', width = width, color = 'g', linewidth = 0, edgecolor = 'black', alpha = 0.5)
plt.xlabel('4 leptoner - invariant massa (GeV)', fontsize = 15)
plt.ylabel('Händelser / 3 GeV\n', fontsize = 15)
plt.xlim(rmin,rmax)
plt.show()
# ttbar, par av top- och antitop-kvarkar.
plt.figure(figsize = (15,3))
plt.bar(center, ttbar, align = 'center', width = width, color = 'gray', linewidth = 0, edgecolor = 'b', alpha = 0.5)
plt.xlabel('4 leptoner - invariant massa (GeV)', fontsize = 15)
plt.ylabel('Händelser / 3 GeV \n', fontsize = 15)
plt.xlim(rmin,rmax)
plt.show()
plt.figure(figsize = (15,5))
# ttbar
ttbar_bar = plt.bar(center, ttbar, align = 'center', width = width, color = 'gray', linewidth = 0, edgecolor = 'b',
alpha = 0.5, label = r'$t\bar{t}$')
# DY
dy_bar = plt.bar(center, dy, align = 'center', width = width, color = 'g', linewidth = 0, edgecolor = 'black',
alpha = 0.5, bottom = ttbar, label = 'Z/$\gamma^{*}$ + X')
# ZZ
zz_bar = plt.bar(center, zz, align = 'center', width = width, color = 'b', linewidth = 0, edgecolor = 'black',
alpha = 0.5, bottom = ttbar+dy, label = r'ZZ $\rightarrow$ 4l')
plt.title('$ \sqrt{s} = 7$ TeV, L = 2.3 $fb^{-1}$; $\sqrt{s} = 8$ TeV, L = 11.6 $fb^{-1}$ \n', fontsize = 12)
plt.xlabel('$m_{4 leptoner}$ (GeV)', fontsize = 15)
plt.ylabel('Händelser / 3 GeV\n', fontsize = 15)
plt.ylim(0,25)
plt.xlim(rmin,rmax)
plt.legend(fontsize = 15)
plt.show()
plt.figure(figsize = (15,5))
xerrs = [width*0.5 for i in range(0, nbins)]
yerrs = np.sqrt(hist)
# ttbar
ttbar_bar = plt.bar(center, ttbar, align = 'center', width = width, color = 'gray', linewidth = 0, edgecolor = 'b',
alpha = 0.5, label = r'$t\bar{t}$')
# DY
dy_bar = plt.bar(center, dy, align = 'center', width = width, color = 'g', linewidth = 0, edgecolor = 'black',
alpha = 0.5, bottom = ttbar, label = 'Z/$\gamma^{*}$ + X')
# ZZ
zz_bar = plt.bar(center, zz, align = 'center', width = width, color = 'b', linewidth = 0, edgecolor = 'black',
alpha = 0.5, bottom = ttbar+dy, label = r'ZZ $\rightarrow$ 4l')
# Mittaukset.
data_bar = plt.errorbar(center, hist, xerr = xerrs, yerr = yerrs, linestyle = 'None', color = 'black',
marker = 'o', label = 'Data')
plt.title('$ \sqrt{s} = 7$ TeV, L = 2.3 $fb^{-1}$; $\sqrt{s} = 8$ TeV, L = 11.6 $fb^{-1}$ \n', fontsize = 12)
plt.xlabel('$m_{4l}$ (GeV)', fontsize = 15)
plt.ylabel('Händelser / 3 GeV\n', fontsize = 15)
plt.ylim(0,25)
plt.xlim(rmin,rmax)
plt.legend(fontsize = 15)
plt.show()
# HZZ, teoretiskt antagande om Higgsbosonens sönderfallsprodukter utgående från massan 125 GeV
plt.figure(figsize = (15,3))
plt.bar(center, hzz, align = 'center', width = width, color = 'w', linewidth = 1, edgecolor = 'r')
plt.xlabel('4 leptoner - invariant massa (GeV)', fontsize = 15)
plt.ylabel('Händelser / 3 GeV\n', fontsize = 15)
plt.xlim(rmin,rmax)
plt.show()
plt.figure(figsize = (15,5))
# ttbar
ttbar_bar = plt.bar(center, ttbar, align = 'center', width = width, color = 'gray', linewidth = 0, edgecolor = 'b',
alpha = 0.5, label = r'$t\bar{t}$')
# DY
dy_bar = plt.bar(center, dy, align = 'center', width = width, color = 'g', linewidth = 0, edgecolor = 'black',
alpha = 0.5, bottom = ttbar, label = 'Z/$\gamma^{*}$ + X')
# ZZ
zz_bar = plt.bar(center, zz, align = 'center', width = width, color = 'b', linewidth = 0, edgecolor = 'black',
alpha = 0.5, bottom = ttbar+dy, label = r'ZZ $\rightarrow$ 4l')
# HZZ
hzz_bar = plt.bar(center, hzz, align = 'center', width = width, color = 'w', linewidth = 1, edgecolor = 'r',
bottom = ttbar+dy+zz, label = '$m_{H}$ = 125 GeV')
# Mittaukset.
data_bar = plt.errorbar(center, hist, xerr = xerrs, yerr = yerrs, linestyle = 'None', color = 'black',
marker = 'o', label = 'Data')
plt.title('$ \sqrt{s} = 7$ TeV, L = 2.3 $fb^{-1}$; $\sqrt{s} = 8$ TeV, L = 11.6 $fb^{-1}$ \n', fontsize = 12)
plt.xlabel('$m_{4 leptoner}$ (GeV)', fontsize = 15)
plt.ylabel('Händelser / 3 GeV\n', fontsize = 15)
plt.ylim(0,25)
plt.xlim(rmin,rmax)
plt.legend(fontsize = 15)
plt.show()
# Om man vill kontrollera datan närmare hittas information från alla observationer av fyra partiklar
pd.options.display.max_columns = 50
fourlep.head()
| 0.330903 | 0.800068 |
# Scikit-Learn LogisticRegression
Using IRIS_VIEW from DWC. This view has 150 records
## Install fedml_azure package
```
pip install fedml_azure-1.0.0-py3-none-any.whl --force-reinstall
```
## Import the libraries needed in this notebook
```
from fedml_azure import create_workspace
from fedml_azure import create_compute
from fedml_azure import create_environment
from fedml_azure import DwcAzureTrain
```
## Set up
### Creating a workspace. This takes a dictionary as input for parameter workspace_args.
Before running the below cell, ensure that you have a workspace and replace the subscription_id, resource_group, and workspace_name with your information.
https://docs.microsoft.com/en-us/azure/machine-learning/how-to-manage-workspace?tabs=python
```
#creation of workspace
workspace=create_workspace(workspace_args={"subscription_id": "cb97564e-cea8-45a4-9c5c-a3357e8f7ee4",
"resource_group": "Sample2_AzureML_Resource",
"workspace_name": "Sample2_AzureML_Worskpace"
}
)
```
### Creating a Compute Cluster. This takes the workspace, a compute_type, and compute_args.
https://docs.microsoft.com/en-us/azure/machine-learning/how-to-create-attach-compute-cluster?tabs=python
https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.compute.amlcompute.amlcompute?view=azure-ml-py
```
#creation of compute target
compute=create_compute(workspace=workspace,
compute_type='AmlComputeInstance',
compute_args={
'vm_size':"Standard_D3_v2",
'compute_name':'cpu-clu-log'
}
)
```
### Creating an Environment. This takes the workspace, environment_type, and environment_args.
The whl file for the fedml_azure library must be passed to the pip_wheel_files key in the environment_args.
In this example, we are using a .yml for the environments dependencies and passing the file path to environment_arg's file_path key.
https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.environment(class)?view=azure-ml-py
https://docs.microsoft.com/en-us/azure/machine-learning/how-to-use-environments
```
#creation of environment
environment=create_environment(workspace=workspace,
environment_type='CondaSpecificationEnvironment',
environment_args={'name':'test-env-log',
'file_path': 'conda_dependency.yml',
'pip_wheel_files':['fedml_azure-1.0.0-py3-none-any.whl']})
```
## Now, let's train the model
### First, we need to instantiate the training class - this will assign the resources.
```
train=DwcAzureTrain(workspace=workspace,
environment=environment,
experiment_args={'name':'test-2'},
compute=compute)
```
### Then, we need to generate the run config. This is needed to package the configuration specified so we can submit a job for training.
Before running the following cell, you should have a config.json file with the specified values to allow you to access to DWC. Provide this file path to config_file_path in the below cell.
You should also have the follow view IRIS_VIEW created in your DWC. To gather this data, please refer to https://www.kaggle.com/uciml/iris
https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.scriptrunconfig?view=azure-ml-py
```
#generating the run config
src=train.generate_run_config(config_file_path='dwc_configs/config.json',
config_args={
'source_directory':'Scikit-Learn-Logistic-Regression',
'script':'iris_train.py',
'arguments':[
'--model_file_name','regression.pkl',
'--table_name', 'IRIS_VIEW',
'--table_size', '1']
}
)
```
### Submitting the job for training
```
#submitting the training run
run=train.submit_run(src)
```
## Register the model for deployment
```
model=train.register_model(run=run,
model_args={'model_name':'sklearn_logReg_model',
'model_path':'outputs/regression.pkl'},
resource_config_args={'cpu':1, 'memory_in_gb':0.5},
is_sklearn_model=True
)
print('Name:', model.name)
print('Version:', model.version)
```
|
github_jupyter
|
pip install fedml_azure-1.0.0-py3-none-any.whl --force-reinstall
from fedml_azure import create_workspace
from fedml_azure import create_compute
from fedml_azure import create_environment
from fedml_azure import DwcAzureTrain
#creation of workspace
workspace=create_workspace(workspace_args={"subscription_id": "cb97564e-cea8-45a4-9c5c-a3357e8f7ee4",
"resource_group": "Sample2_AzureML_Resource",
"workspace_name": "Sample2_AzureML_Worskpace"
}
)
#creation of compute target
compute=create_compute(workspace=workspace,
compute_type='AmlComputeInstance',
compute_args={
'vm_size':"Standard_D3_v2",
'compute_name':'cpu-clu-log'
}
)
#creation of environment
environment=create_environment(workspace=workspace,
environment_type='CondaSpecificationEnvironment',
environment_args={'name':'test-env-log',
'file_path': 'conda_dependency.yml',
'pip_wheel_files':['fedml_azure-1.0.0-py3-none-any.whl']})
train=DwcAzureTrain(workspace=workspace,
environment=environment,
experiment_args={'name':'test-2'},
compute=compute)
#generating the run config
src=train.generate_run_config(config_file_path='dwc_configs/config.json',
config_args={
'source_directory':'Scikit-Learn-Logistic-Regression',
'script':'iris_train.py',
'arguments':[
'--model_file_name','regression.pkl',
'--table_name', 'IRIS_VIEW',
'--table_size', '1']
}
)
#submitting the training run
run=train.submit_run(src)
model=train.register_model(run=run,
model_args={'model_name':'sklearn_logReg_model',
'model_path':'outputs/regression.pkl'},
resource_config_args={'cpu':1, 'memory_in_gb':0.5},
is_sklearn_model=True
)
print('Name:', model.name)
print('Version:', model.version)
| 0.520253 | 0.872347 |
# Using distributions
`uravu` isn't limited to using normally distributed ordinate values.
Any distribution of ordinate values can be used, as `uravu` will perform a Gaussian [kernel density estimation](https://en.wikipedia.org/wiki/Kernel_density_estimation) on the samples to determine a description for the distribution.
This is most easily shown in action, imagine we have some experimental data, that is distributed with a skew normal distribution, rather than the typical normal distribution.
So the values for a particular $y$-value take the (peculiar) shape,
```
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import skewnorm
from uravu.distribution import Distribution
from uravu.relationship import Relationship
from uravu.utils import straight_line
from uravu import plotting
np.random.seed(2)
y = skewnorm(10, 0, 0.1)
plt.hist(y.rvs(size=5000), bins=25)
plt.show()
```
Let's build some synthetic data, collected by sampling some skew normal distribution across a series of values of $x$.
```
x = np.linspace(1, 100, 10)
Y = []
for i in x:
Y.append(Distribution(skewnorm.rvs(10, i*3.14, i*0.5, size=5000)+(1*np.random.randn())))
```
Note that the sample, in this case a series of random values from the distribution, are passed to the `uravu.distribution.Distribution` object and stored as a list of `Distribution` objects.
This `list` is passed to the `Relationship` class as shown below (note the `ordinate_error` keyword argument is no longer used as we are describing the distribution directly in the `Y` variable),
```
r = Relationship(straight_line, x, Y, bounds=((-10, 10), (-10, 10)))
r.max_likelihood('diff_evo')
r.variable_medians
plotting.plot_relationship(r)
plt.show()
```
It is then possible to use the standard sampling methods to investigate the distribution of the model parameters, here the model is a simple straight line relationship.
```
r.mcmc()
```
Above [Markov chain Monte Carlo](./mcmc.html) is used to sample the distribution of the gradient and intercept of the straight line, given the uncertainties in the ordinate values (from the distributions).
These distributions can be visualised with the `plot_distribution` from the `uravu.plotting` library.
```
plotting.plot_distribution(r.variables[0])
plt.show()
plotting.plot_distribution(r.variables[1])
plt.show()
```
We can also see how these distributions affect the agreement with the data by plotting the relationship.
```
plotting.plot_relationship(r)
plt.show()
```
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import skewnorm
from uravu.distribution import Distribution
from uravu.relationship import Relationship
from uravu.utils import straight_line
from uravu import plotting
np.random.seed(2)
y = skewnorm(10, 0, 0.1)
plt.hist(y.rvs(size=5000), bins=25)
plt.show()
x = np.linspace(1, 100, 10)
Y = []
for i in x:
Y.append(Distribution(skewnorm.rvs(10, i*3.14, i*0.5, size=5000)+(1*np.random.randn())))
r = Relationship(straight_line, x, Y, bounds=((-10, 10), (-10, 10)))
r.max_likelihood('diff_evo')
r.variable_medians
plotting.plot_relationship(r)
plt.show()
r.mcmc()
plotting.plot_distribution(r.variables[0])
plt.show()
plotting.plot_distribution(r.variables[1])
plt.show()
plotting.plot_relationship(r)
plt.show()
| 0.462473 | 0.992988 |
# Perceptron
Implementation of the classic Perceptron by Frank Rosenblatt for binary classification.
### Imports
```
import numpy as np
import matplotlib.pyplot as plt
import torch
%matplotlib inline
```
### Dataset
```
data = np.genfromtxt("./data/data.txt", delimiter="\t")
X, y = data[:, :2], data[:, 2]
y = y.astype("int")
print("Class counts:", np.bincount((y)))
print("X shape", X.shape)
print("y shape", y.shape)
# Shuffling & train/test split
shuffle_idx = np.arange(y.shape[0])
shuffle_rng = np.random.RandomState(1)
shuffle_rng.shuffle(shuffle_idx)
X, y = X[shuffle_idx], y[shuffle_idx]
X_train, X_test = X[shuffle_idx[:70]], X[shuffle_idx[70:]]
y_train, y_test = y[shuffle_idx[:70]], y[shuffle_idx[70:]]
# Normalize
mu, sigma = X_train.mean(axis=0), X_train.std(axis=0)
X_train = (X_train - mu) / sigma
X_test = (X_test - mu) / sigma
plt.scatter(X_train[y_train==0, 0], X_train[y_train==0, 1], label='class 0', marker='o')
plt.scatter(X_train[y_train==1, 0], X_train[y_train==1, 1], label='class 1', marker='s')
plt.xlabel('feature 1')
plt.ylabel('feature 2')
plt.legend()
plt.show()
```
### Model
```
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Perceptron():
def __init__(self, num_features):
self.num_features = num_features
self.weights = torch.zeros(num_features, 1,
dtype=torch.float32, device=device)
self.bias = torch.zeros(1, dtype=torch.float32, device=device)
def forward(self, x):
linear = torch.add(torch.mm(x, self.weights), self.bias)
predictions = np.where(linear > 0., 1, 0)
return predictions
def backward(self, x, y):
predictions = self.forward(x)
errors = y - predictions
return errors
def train(self, x, y, epochs):
for e in range(epochs):
for i in range(y.size()[0]):
errors = self.backward(x[i].view(1, self.num_features), y[i]).view(-1)
self.weights += (errors * x[i]).view(self.num_features, 1)
self.bias += errors
def evaluate(self, x, y):
predictions = self.forward(x)
accuracy = torch.sum(torch.tensor(predictions).view(-1) == y) / y.shape[0]
return accuracy
```
### Train
```
model = Perceptron(num_features=2)
X_train_tensor = torch.tensor(X_train, dtype=torch.float32, device=device)
y_train_tensor = torch.tensor(y_train, dtype=torch.float32, device=device)
model.train(X_train_tensor, y_train_tensor, epochs=5)
print('Model parameters:')
print('Weights: %s' % model.weights)
print('Bias: %s' % model.bias)
```
### Test
```
X_test_tensor = torch.tensor(X_test, dtype=torch.float32, device=device)
y_test_tensor = torch.tensor(y_test, dtype=torch.float32, device=device)
test_acc = model.evaluate(X_test_tensor, y_test_tensor)
print('Test set accuracy: %.2f%%' % (test_acc*100))
```
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
import torch
%matplotlib inline
data = np.genfromtxt("./data/data.txt", delimiter="\t")
X, y = data[:, :2], data[:, 2]
y = y.astype("int")
print("Class counts:", np.bincount((y)))
print("X shape", X.shape)
print("y shape", y.shape)
# Shuffling & train/test split
shuffle_idx = np.arange(y.shape[0])
shuffle_rng = np.random.RandomState(1)
shuffle_rng.shuffle(shuffle_idx)
X, y = X[shuffle_idx], y[shuffle_idx]
X_train, X_test = X[shuffle_idx[:70]], X[shuffle_idx[70:]]
y_train, y_test = y[shuffle_idx[:70]], y[shuffle_idx[70:]]
# Normalize
mu, sigma = X_train.mean(axis=0), X_train.std(axis=0)
X_train = (X_train - mu) / sigma
X_test = (X_test - mu) / sigma
plt.scatter(X_train[y_train==0, 0], X_train[y_train==0, 1], label='class 0', marker='o')
plt.scatter(X_train[y_train==1, 0], X_train[y_train==1, 1], label='class 1', marker='s')
plt.xlabel('feature 1')
plt.ylabel('feature 2')
plt.legend()
plt.show()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Perceptron():
def __init__(self, num_features):
self.num_features = num_features
self.weights = torch.zeros(num_features, 1,
dtype=torch.float32, device=device)
self.bias = torch.zeros(1, dtype=torch.float32, device=device)
def forward(self, x):
linear = torch.add(torch.mm(x, self.weights), self.bias)
predictions = np.where(linear > 0., 1, 0)
return predictions
def backward(self, x, y):
predictions = self.forward(x)
errors = y - predictions
return errors
def train(self, x, y, epochs):
for e in range(epochs):
for i in range(y.size()[0]):
errors = self.backward(x[i].view(1, self.num_features), y[i]).view(-1)
self.weights += (errors * x[i]).view(self.num_features, 1)
self.bias += errors
def evaluate(self, x, y):
predictions = self.forward(x)
accuracy = torch.sum(torch.tensor(predictions).view(-1) == y) / y.shape[0]
return accuracy
model = Perceptron(num_features=2)
X_train_tensor = torch.tensor(X_train, dtype=torch.float32, device=device)
y_train_tensor = torch.tensor(y_train, dtype=torch.float32, device=device)
model.train(X_train_tensor, y_train_tensor, epochs=5)
print('Model parameters:')
print('Weights: %s' % model.weights)
print('Bias: %s' % model.bias)
X_test_tensor = torch.tensor(X_test, dtype=torch.float32, device=device)
y_test_tensor = torch.tensor(y_test, dtype=torch.float32, device=device)
test_acc = model.evaluate(X_test_tensor, y_test_tensor)
print('Test set accuracy: %.2f%%' % (test_acc*100))
| 0.735926 | 0.955277 |
# Lista 04 - Teste de Hipótese
```
#Defina a semente de geração de números aleatórios
#Não gerem mais números aleatórios do que o necessário, ou a correção dará errado
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(2019104)
```
# Exercício 01:
Em uma determinada universidade, foi realizada uma entrevista em que se desejava coletar respostas de alunos de todas as grandes áreas de conhecimento. Dentre os 1000 entrevistados, cada área teve a seguinte quantidade de alunos:
* Exatas: 100
* Humanas: 600
* Biológicas: 300
No entanto, a qualidade da amostra foi questionada. Argumentou-se que a proporção de alunos de Humanas entrevistados foi muito maior que a proporção de alunos da mesma área na universidade como um todo. Na universidade, há cadastro de:
* 2000 alunos de Exatas
* 8000 alunos de Humanas
* 2500 alunos de Biológicas
A) Retorne uma lista de True ou False de acordo com as afirmativas abaixo:
1: A respeito da hipótese nula, podemos realizar testes que nos permitam aceitá-la como verdadeira.
2: Podemos definir a seguinte hipótese nula: "A amostra utilizada na entrevista não foi gerada de forma uniformemente aleatória".
3: Podemos definir a seguinte hipótese nula: "A amostra utilizada na entrevista foi gerada de forma uniformemente aleatória".
Exemplo:
```python
def q1a():
return [ False, False, False ]
```
```
def q1a():
return [True, False, True]
```
B) Para comparar as distribuições, implemente a métrica de Total Variation Distance.
```
def tvd(p, q):
return sum([abs(p[i]-q[i]) for i in range(len(p))])/2.0
from numpy.testing import *
pop = np.array([0.15, 0.18, 0.12, 0.54, 0.01])
samp = np.array([0.26, 0.08, 0.08, 0.54, 0.04])
assert_equal( tvd(pop,samp), 0.14 )
```
C) Calcule o TVD da amostra utilizada na entrevista
__Dica:__ A Total Variation Distance é uma métrica de distância entre distribuições de probabilidade, não entre quantidades absolutas.
```
amostra = [100.0/1000.0, 600.0/1000.0, 300.0/1000.0]
populacao = [2000.0/12500.0, 8000.0/12500.0, 2500.0/12500.0]
def q1c():
return tvd(amostra, populacao)
```
D) Implemente a função que simule uma amostra uniformemente aleatória, retornando uma lista de proporções para cada categoria (Exatas, Humanas, Biológicas).
Exemplo:
```python
def gera_amostra(tamanho_amostra):
return [0.6, 0.0, 0.4]
```
__Dica:__ crie uma lista para a população (indicando a área de conhecimento do estudante por 0,1 ou 2, por exemplo) e use a função ```numpy.random.shuffle``` para embaralhar os dados. Depois, pegue os primeiros elementos da lista para formar a amostra, e calcule as proporções a partir disso.
```
def gera_amostra(tamanho_amostra):
pop = [0]*2000 + [1]*8000 + [2]*2500
np.random.shuffle(pop)
amost = pop[:tamanho_amostra]
proporcoes = [amost.count(0)/tamanho_amostra, amost.count(1)/tamanho_amostra, amost.count(2)/tamanho_amostra]
return proporcoes
```
E) Gere 10000 amostras, e plote o gráfico de distribuição dos valores de TVD. Coloque no gráfico um ponto mostrando o TVD da amostra da entrevista.
```
populacao = [2000.0/12500.0, 8000.0/12500.0, 2500.0/12500.0]
N = 10000
tamanhoAmostra = 1000
tvds = []
for _ in range(N):
proporcaoAmostra = gera_amostra(tamanhoAmostra)
tvds.append(tvd(proporcaoAmostra, populacao))
plt.scatter(q1c(), 10, color="red", alpha=1)
plt.hist(tvds, bins=15)
plt.xlabel("TVD")
plt.ylabel("# Simulações")
```
F) Escreva uma função que retorne a partir de qual valor de TVD não estamos mais dentro do intervalo de 90% de confiança
```
def q1f():
return np.percentile(tvds, 90.0)
```
# Exercício 02:
Usando os dados do arquivo ```baby.csv```, vamos realizar um teste por permutação.
```
import pandas as pd
df = pd.read_csv('baby.csv')
# Convertendo para unidades não EUA
df['Birth Weight'] = 0.0283495 * df['Birth Weight']
df['Maternal Pregnancy Weight'] = 0.0283495 * df['Maternal Pregnancy Weight']
df['Maternal Height'] = 0.0254 * df['Maternal Height']
df.head()
```
A) Implemente uma função que retorne a diferença entre a média dos pesos dos bebês para as mães fumantes e a média para as mães não fumantes. Exemplo:
```python
def q2a(df):
media_fumantes = 3.1
media_nao_fumantes = 3.8
return -0.7
```
```
def q2a(df):
fumantes = df[df['Maternal Smoker'] == True]
naoFumantes = df[df['Maternal Smoker'] == False]
return fumantes['Birth Weight'].mean() - naoFumantes['Birth Weight'].mean()
```
B) Realize permutações com os rótulos (mães fumantes / não fumantes), de tamanhos ```[10,100,500,1000,5000]```. Plote gráficos mostrando a distribuição das diferenças entre as médias de peso dos bebês, conforme se aumenta a quantidade de permutações. Ou seja, um gráfico para 10 permutações, um para 100, e assim por diante.
Como exemplo, a imagem abaixo mostra o gráfico para 100 permutações.
```
from IPython.display import Image
Image('100permutacoes.png')
fumantes = df['Maternal Smoker'] == True
diferencaReal = q2a(df)
for tamanhoPermutacao in [10, 100, 500, 1000, 5000]:
diferencas = []
for _ in range(tamanhoPermutacao):
copiaFumantes = fumantes
np.random.shuffle(copiaFumantes.values)
diferencas.append(df[copiaFumantes]['Birth Weight'].mean() - df[~copiaFumantes]['Birth Weight'].mean())
# Plota a distribuição das diferenças
plt.title(str(tamanhoPermutacao) + " Permutações")
plt.xlabel("Diferança na Permutação")
plt.ylabel("Pr(diff)")
plt.vlines(diferencaReal, ymin=0.0, ymax=20, color="red")
plt.hist(diferencas, density=True, bins=50)
plt.show()
```
B) Escreva abaixo o que você observou com os gráficos.
Seguindo o Teorema Central do Limite, conforme aumentamos a quantidade de permutações, a distribuição da diferença das médias mais se parece com uma distribuição normal.
Em todos os gráficos podemos perceber que a diferença real entre mães fumantes e não fumantes é um outlier (com significância >= 99%), e isto faz com que possamos rejeitar a hipótese nula de que essa diferença é fruto do acaso.
|
github_jupyter
|
#Defina a semente de geração de números aleatórios
#Não gerem mais números aleatórios do que o necessário, ou a correção dará errado
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(2019104)
def q1a():
return [ False, False, False ]
def q1a():
return [True, False, True]
def tvd(p, q):
return sum([abs(p[i]-q[i]) for i in range(len(p))])/2.0
from numpy.testing import *
pop = np.array([0.15, 0.18, 0.12, 0.54, 0.01])
samp = np.array([0.26, 0.08, 0.08, 0.54, 0.04])
assert_equal( tvd(pop,samp), 0.14 )
amostra = [100.0/1000.0, 600.0/1000.0, 300.0/1000.0]
populacao = [2000.0/12500.0, 8000.0/12500.0, 2500.0/12500.0]
def q1c():
return tvd(amostra, populacao)
def gera_amostra(tamanho_amostra):
return [0.6, 0.0, 0.4]
def gera_amostra(tamanho_amostra):
pop = [0]*2000 + [1]*8000 + [2]*2500
np.random.shuffle(pop)
amost = pop[:tamanho_amostra]
proporcoes = [amost.count(0)/tamanho_amostra, amost.count(1)/tamanho_amostra, amost.count(2)/tamanho_amostra]
return proporcoes
populacao = [2000.0/12500.0, 8000.0/12500.0, 2500.0/12500.0]
N = 10000
tamanhoAmostra = 1000
tvds = []
for _ in range(N):
proporcaoAmostra = gera_amostra(tamanhoAmostra)
tvds.append(tvd(proporcaoAmostra, populacao))
plt.scatter(q1c(), 10, color="red", alpha=1)
plt.hist(tvds, bins=15)
plt.xlabel("TVD")
plt.ylabel("# Simulações")
def q1f():
return np.percentile(tvds, 90.0)
import pandas as pd
df = pd.read_csv('baby.csv')
# Convertendo para unidades não EUA
df['Birth Weight'] = 0.0283495 * df['Birth Weight']
df['Maternal Pregnancy Weight'] = 0.0283495 * df['Maternal Pregnancy Weight']
df['Maternal Height'] = 0.0254 * df['Maternal Height']
df.head()
def q2a(df):
media_fumantes = 3.1
media_nao_fumantes = 3.8
return -0.7
def q2a(df):
fumantes = df[df['Maternal Smoker'] == True]
naoFumantes = df[df['Maternal Smoker'] == False]
return fumantes['Birth Weight'].mean() - naoFumantes['Birth Weight'].mean()
from IPython.display import Image
Image('100permutacoes.png')
fumantes = df['Maternal Smoker'] == True
diferencaReal = q2a(df)
for tamanhoPermutacao in [10, 100, 500, 1000, 5000]:
diferencas = []
for _ in range(tamanhoPermutacao):
copiaFumantes = fumantes
np.random.shuffle(copiaFumantes.values)
diferencas.append(df[copiaFumantes]['Birth Weight'].mean() - df[~copiaFumantes]['Birth Weight'].mean())
# Plota a distribuição das diferenças
plt.title(str(tamanhoPermutacao) + " Permutações")
plt.xlabel("Diferança na Permutação")
plt.ylabel("Pr(diff)")
plt.vlines(diferencaReal, ymin=0.0, ymax=20, color="red")
plt.hist(diferencas, density=True, bins=50)
plt.show()
| 0.466116 | 0.954223 |
# Text Processing
## Capturing Text Data
### Plain Text
```
import os
# Read in a plain text file
with open(os.path.join("data", "hieroglyph.txt"), "r") as f:
text = f.read()
print(text)
```
### Tabular Data
```
import pandas as pd
# Extract text column from a dataframe
df = pd.read_csv(os.path.join("data", "news.csv"))
df.head()[['publisher', 'title']]
# Convert text column to lowercase
df['title'] = df['title'].str.lower()
df.head()[['publisher', 'title']]
```
### Online Resource
```
import requests
import json
# Fetch data from a REST API
r = requests.get(
"https://quotes.rest/qod.json")
res = r.json()
print(json.dumps(res, indent=4))
# Extract relevant object and field
q = res["contents"]["quotes"][0]
print(q["quote"], "\n--", q["author"])
```
## Cleaning
```
import requests
# Fetch a web page
r = requests.get("https://news.ycombinator.com")
print(r.text)
import re
# Remove HTML tags using RegEx
pattern = re.compile(r'<.*?>') # tags look like <...>
print(pattern.sub('', r.text)) # replace them with blank
from bs4 import BeautifulSoup
# Remove HTML tags using Beautiful Soup library
soup = BeautifulSoup(r.text, "html5lib")
print(soup.get_text())
# Find all articles
summaries = soup.find_all("tr", class_="athing")
summaries[0]
# Extract title
summaries[0].find("a", class_="storylink").get_text().strip()
# Find all articles, extract titles
articles = []
summaries = soup.find_all("tr", class_="athing")
for summary in summaries:
title = summary.find("a", class_="storylink").get_text().strip()
articles.append((title))
print(len(articles), "Article summaries found. Sample:")
print(articles[0])
```
## Normalization
### Case Normalization
```
# Sample text
text = "The first time you see The Second Renaissance it may look boring. Look at it at least twice and definitely watch part 2. It will change your view of the matrix. Are the human people the ones who started the war ? Is AI a bad thing ?"
print(text)
# Convert to lowercase
text = text.lower()
print(text)
```
### Punctuation Removal
```
import re
# Remove punctuation characters
text = re.sub(r"[^a-zA-Z0-9]", " ", text)
print(text)
```
## Tokenization
```
# Split text into tokens (words)
words = text.split()
print(words)
```
### NLTK: Natural Language ToolKit
```
import os
import nltk
nltk.data.path.append(os.path.join(os.getcwd(), "nltk_data"))
# Another sample text
text = "Dr. Smith graduated from the University of Washington. He later started an analytics firm called Lux, which catered to enterprise customers."
print(text)
from nltk.tokenize import word_tokenize
# Split text into words using NLTK
words = word_tokenize(text)
print(words)
from nltk.tokenize import sent_tokenize
# Split text into sentences
sentences = sent_tokenize(text)
print(sentences)
# List stop words
from nltk.corpus import stopwords
print(stopwords.words("english"))
# Reset text
text = "The first time you see The Second Renaissance it may look boring. Look at it at least twice and definitely watch part 2. It will change your view of the matrix. Are the human people the ones who started the war ? Is AI a bad thing ?"
# Normalize it
text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower())
# Tokenize it
words = text.split()
print(words)
# Remove stop words
words = [w for w in words if w not in stopwords.words("english")]
print(words)
```
### Sentence Parsing
```
import nltk
# Define a custom grammar
my_grammar = nltk.CFG.fromstring("""
S -> NP VP
PP -> P NP
NP -> Det N | Det N PP | 'I'
VP -> V NP | VP PP
Det -> 'an' | 'my'
N -> 'elephant' | 'pajamas'
V -> 'shot'
P -> 'in'
""")
parser = nltk.ChartParser(my_grammar)
# Parse a sentence
sentence = word_tokenize("I shot an elephant in my pajamas")
for tree in parser.parse(sentence):
print(tree)
```
## Stemming & Lemmatization
### Stemming
```
from nltk.stem.porter import PorterStemmer
# Reduce words to their stems
stemmed = [PorterStemmer().stem(w) for w in words]
print(stemmed)
```
### Lemmatization
```
from nltk.stem.wordnet import WordNetLemmatizer
# Reduce words to their root form
lemmed = [WordNetLemmatizer().lemmatize(w) for w in words]
print(lemmed)
# Lemmatize verbs by specifying pos
lemmed = [WordNetLemmatizer().lemmatize(w, pos='v') for w in lemmed]
print(lemmed)
```
|
github_jupyter
|
import os
# Read in a plain text file
with open(os.path.join("data", "hieroglyph.txt"), "r") as f:
text = f.read()
print(text)
import pandas as pd
# Extract text column from a dataframe
df = pd.read_csv(os.path.join("data", "news.csv"))
df.head()[['publisher', 'title']]
# Convert text column to lowercase
df['title'] = df['title'].str.lower()
df.head()[['publisher', 'title']]
import requests
import json
# Fetch data from a REST API
r = requests.get(
"https://quotes.rest/qod.json")
res = r.json()
print(json.dumps(res, indent=4))
# Extract relevant object and field
q = res["contents"]["quotes"][0]
print(q["quote"], "\n--", q["author"])
import requests
# Fetch a web page
r = requests.get("https://news.ycombinator.com")
print(r.text)
import re
# Remove HTML tags using RegEx
pattern = re.compile(r'<.*?>') # tags look like <...>
print(pattern.sub('', r.text)) # replace them with blank
from bs4 import BeautifulSoup
# Remove HTML tags using Beautiful Soup library
soup = BeautifulSoup(r.text, "html5lib")
print(soup.get_text())
# Find all articles
summaries = soup.find_all("tr", class_="athing")
summaries[0]
# Extract title
summaries[0].find("a", class_="storylink").get_text().strip()
# Find all articles, extract titles
articles = []
summaries = soup.find_all("tr", class_="athing")
for summary in summaries:
title = summary.find("a", class_="storylink").get_text().strip()
articles.append((title))
print(len(articles), "Article summaries found. Sample:")
print(articles[0])
# Sample text
text = "The first time you see The Second Renaissance it may look boring. Look at it at least twice and definitely watch part 2. It will change your view of the matrix. Are the human people the ones who started the war ? Is AI a bad thing ?"
print(text)
# Convert to lowercase
text = text.lower()
print(text)
import re
# Remove punctuation characters
text = re.sub(r"[^a-zA-Z0-9]", " ", text)
print(text)
# Split text into tokens (words)
words = text.split()
print(words)
import os
import nltk
nltk.data.path.append(os.path.join(os.getcwd(), "nltk_data"))
# Another sample text
text = "Dr. Smith graduated from the University of Washington. He later started an analytics firm called Lux, which catered to enterprise customers."
print(text)
from nltk.tokenize import word_tokenize
# Split text into words using NLTK
words = word_tokenize(text)
print(words)
from nltk.tokenize import sent_tokenize
# Split text into sentences
sentences = sent_tokenize(text)
print(sentences)
# List stop words
from nltk.corpus import stopwords
print(stopwords.words("english"))
# Reset text
text = "The first time you see The Second Renaissance it may look boring. Look at it at least twice and definitely watch part 2. It will change your view of the matrix. Are the human people the ones who started the war ? Is AI a bad thing ?"
# Normalize it
text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower())
# Tokenize it
words = text.split()
print(words)
# Remove stop words
words = [w for w in words if w not in stopwords.words("english")]
print(words)
import nltk
# Define a custom grammar
my_grammar = nltk.CFG.fromstring("""
S -> NP VP
PP -> P NP
NP -> Det N | Det N PP | 'I'
VP -> V NP | VP PP
Det -> 'an' | 'my'
N -> 'elephant' | 'pajamas'
V -> 'shot'
P -> 'in'
""")
parser = nltk.ChartParser(my_grammar)
# Parse a sentence
sentence = word_tokenize("I shot an elephant in my pajamas")
for tree in parser.parse(sentence):
print(tree)
from nltk.stem.porter import PorterStemmer
# Reduce words to their stems
stemmed = [PorterStemmer().stem(w) for w in words]
print(stemmed)
from nltk.stem.wordnet import WordNetLemmatizer
# Reduce words to their root form
lemmed = [WordNetLemmatizer().lemmatize(w) for w in words]
print(lemmed)
# Lemmatize verbs by specifying pos
lemmed = [WordNetLemmatizer().lemmatize(w, pos='v') for w in lemmed]
print(lemmed)
| 0.287568 | 0.710139 |
# Standardized Groundwater Index (SGI)
*R.A. Collenteur, University of Graz, November 2020*
To study the occurrence of groundwater droughts [Bloomfield and Marchant (2013)](#References) developed the Standardized Groundwater Index (SGI). More and more SGI values are being used to study and quantify groundwater droughts. In this Notebook it is shown how to compute the SGI using Pastas, and how Pastas models may be used to obtain groundwater level time series with regular time steps. The SGI implemented in Pastas (`ps.stats.sgi`) is based on the description in [Bloomfield and Marchant (2013)](#References).
The SGI requires regular time steps between groundwater levels observation, while historic groundwater level time series are often characterized by irregular time intervals between observations. To overcome this issue, [Marchant and Bloomfield(2018)](#References) applied time series models using impulse response functions to simulate groundwater level time series at a regular time interval. Here, this methodology is extended by using evaporation and precipitation as model input and using a nonlinear recharge model ([Collenteur et al. (2021)](#References)) to compute groundwater recharge and finally groundwater levels.
**Note that this notebook is meant as an example of how Pastas models may be used to support studies computing SGI values, and not as an guide how to compute or interpret the SGI values.**
```
import pandas as pd
import pastas as ps
import matplotlib.pyplot as plt
ps.set_log_level("ERROR")
ps.show_versions()
```
## The first example model
### 1. Loading the data
In this example we model the groundwater levels for a monitoring well (B32C0639, filter 1) near the town "de Bilt" in the Netherlands. Precipitation and evaporation are available from the nearby meteorological station of the KNMI. The groundwater level observations have irregular time steps.
```
# Load input data
head = pd.read_csv("../data/B32C0639001.csv", parse_dates=['date'],
index_col='date', squeeze=True)
evap = ps.read_knmi("../data/etmgeg_260.txt", variables="EV24").series * 1e3
rain = ps.read_knmi("../data/etmgeg_260.txt", variables="RH").series * 1e3
# Plot input data
fig, axes = plt.subplots(3,1, figsize=(10,6), sharex=True)
head.plot(ax=axes[0], x_compat=True, linestyle=" ", marker=".")
evap.plot(ax=axes[1], x_compat=True)
rain.plot(ax=axes[2], x_compat=True)
axes[0].set_ylabel("Head [m]")
axes[1].set_ylabel("Evap [mm/d]")
axes[2].set_ylabel("Rain [mm/d]")
plt.xlim("1982", "2005");
```
### 2. Creating and calibrating the model
We now create a simple time series model using a parsimonious non-linear recharge model to translate precipitation and evaporation into groundwater recharge. The recharge flux is then convoluted with an exponential response function to compute the contribution of the recharge to the groundwater level fluctuations. The results are plotted below.
```
# Create the basic Pastas model
ml = ps.Model(head)
# Add a recharge model
rch = ps.rch.FlexModel()
rm = ps.RechargeModel(rain, evap, recharge=rch, rfunc=ps.Exponential, name="rch")
ml.add_stressmodel(rm)
# Solve the model
ml.solve(noise=True, tmin="1990", report=False)
ml.plots.results(figsize=(10, 6));
```
### 3. Computing and visualizing the SGI
The plot above shows that we have a pretty good model fit with the data. This is particularly important when we want to compute the SGI using the simulated time series. We now compute the SGI and show the models results and estimated SGI in one figure. A possible extension to the SGI computation below is to take the uncertainty of the groundwater level simulation into account, as is done by [Marchant and Bloomfield (2018)](#References).
```
# Compute the SGI
sim = ml.simulate(tmin="1990")
sgi = ps.stats.sgi(sim.resample("W").mean())
ci = ml.fit.prediction_interval(n=10)
# Make the plot
fig, [ax1, ax2] = plt.subplots(2, 1, figsize=(10,5), sharex=True)
# Upper subplot
sim.plot(ax=ax1, zorder=10)
ml.oseries.series.plot(ax=ax1, linestyle=" ", marker=".", color="k")
ax1.fill_between(ci.index, ci.iloc[:,0], ci.iloc[:,1], color="gray")
ax1.legend(["Simulation", "Observations", "Prediction interval"], ncol=3)
# Lower subplot
sgi.plot(ax=ax2, color="k")
ax2.axhline(0, linestyle="--", color="k")
droughts = sgi.to_numpy(copy=True)
droughts[droughts > 0] = 0
ax2.fill_between(sgi.index, 0, droughts, color="C0")
# Dress up the plot
ax1.set_ylabel("GWL [m]")
ax1.set_title("Groundwater levels")
ax2.set_ylabel("SGI [-]")
ax2.set_title("Standardized Groundwater Index")
```
## Second Example: data with trends
For the example above precipitation and evaporation were sufficient to accurately simulate the groundwater levels. Now we look at an example of where this is not the case. The groundwater levels are again observed near the town of de Bilt in the Netherlands. The time series have a more irregularities in the time step between observations and end with high frequency observations.
### 1. Create a simple model
```
# Loads heads and create Pastas model
head2 = ps.read_dino("../data/B32C0609001_1.csv")
ml2 = ps.Model(head2)
# Add a recharge model
rch = ps.rch.FlexModel()
rm = ps.RechargeModel(rain, evap, recharge=rch, rfunc=ps.Exponential, name="rch")
ml2.add_stressmodel(rm)
# Solve and plot the model
ml2.solve(noise=True, tmin="1990", report=False)
ml2.plots.results(figsize=(10, 6));
```
### 2. Add linear trend
Clearly the model fit with the data in the above figure is not so good. Looking at the model residuals (simulation - observation) we can observe a steady upward trend in the residuals. Let's try and add a linear trend to the model to improve the groundwater level simulation.
```
# Add a linear trend
tm = ps.LinearTrend("1990", "2020", name="trend")
ml2.add_stressmodel(tm)
# Solve the model
#ml2.solve(noise=False, tmin="1990", report=False) # Get better initial estimated first
ml2.solve(noise=True, tmin="1990", report=False)
ml2.plots.results(figsize=(10, 6));
```
### 3. Computing and plotting the SGI
The model fit for the model above looks a lot better. Now we can compute and plot the SGI again as we did before.
```
# Compute the SGI
sim = ml2.simulate(tmin="1990")
sgi = ps.stats.sgi(sim.resample("W").mean())
ci = ml2.fit.prediction_interval(n=10)
# Make the plot
fig, [ax1, ax2] = plt.subplots(2, 1, figsize=(10,5), sharex=True)
# Upper subplot
sim.plot(ax=ax1, zorder=10)
ml2.oseries.series.plot(ax=ax1, linestyle=" ", marker=".", color="k")
ax1.fill_between(ci.index, ci.iloc[:,0], ci.iloc[:,1], color="gray")
ax1.legend(["Simulation", "Observations", "Prediction interval"], ncol=3)
# Lower subplot
sgi.plot(ax=ax2, color="k")
ax2.axhline(0, linestyle="--", color="k")
droughts = sgi.to_numpy(copy=True)
droughts[droughts > 0] = 0
ax2.fill_between(sgi.index, 0, droughts, color="C0")
# Dress up the plot
ax1.set_ylabel("GWL [m]")
ax1.set_title("Groundwater levels")
ax2.set_ylabel("SGI [-]")
ax2.set_title("Standardized Groundwater Index");
```
## What about human influenced groundwater systems?
Let's explore the possibilities of using the Pastas framework a bit here. The first example showed SGI values for a system under natural conditions, with only recharge being enough to explain the groundwater level fluctuations. In the second example a small linear trend had to be added, without explicit knowledge of what may have caused this trend. In this third and final example we consider an aquifer system that is influenced by groundwater pumping.
The question we want to answer is how the SGI values may have looked without groundwater pumping (a natural system) and compare these to the SGI values with groundwater pumping. We can see clearly from the model that groundwater pumping decreased the groundwater levels, but how does it impact the SGI values?
```
# Load input data
head = pd.read_csv("data_notebook_9/head.csv", parse_dates=True, index_col=0, squeeze=True)
prec = pd.read_csv("data_notebook_9/prec.csv", parse_dates=True, index_col=0, squeeze=True)
evap = pd.read_csv("data_notebook_9/evap.csv", parse_dates=True, index_col=0, squeeze=True)
well = pd.read_csv("data_notebook_9/well.csv", parse_dates=True, index_col=0, squeeze=True)
# Create the Pastas model
ml3 = ps.Model(head, name="heads")
# Add recharge and a well
sm = ps.RechargeModel(prec, evap, ps.Exponential,
name='rch', recharge=ps.rch.FlexModel())
wm = ps.StressModel(well, ps.Exponential, well.name,
up=False, settings="well")
ml3.add_stressmodel([sm, wm])
# Solve the model
ml3.solve(noise=True, report=False)
ml3.plots.results(figsize=(10, 6));
```
### 2. SGI with and without groundwater pumping
Now that we have a model with a reasonably good fit, we can use the model to separate the effect of groundwater pumping from the effect of recharge. We then compute SGI values on the groundwater levels with and without pumping and compare them visually. The results are shown below, and show very different SGI values as expected.
```
# Compute the SGI
sim = ml3.simulate(tmin="1940")
sgi = ps.stats.sgi(sim.resample("M").mean())
recharge = ml3.get_contribution("rch", tmin="1940")
sgi2 = ps.stats.sgi(recharge.resample("M").mean())
#ci = ml3.fit.prediction_interval()
# Make the plot
fig, [ax1, ax2, ax3] = plt.subplots(3, 1, figsize=(10,6), sharex=True)
sim.plot(ax=ax1, x_compat=True)
(recharge+ml3.get_parameters("constant")).plot(ax=ax1, linestyle="--")
ml3.oseries.series.plot(ax=ax1, linestyle=" ", marker=".", zorder=-1,
markersize=2, color="k", x_compat=True)
#ax1.fill_between(ci.index, ci.iloc[:,0], ci.iloc[:,1], color="gray")
ax1.legend(["Simulation", "Simulation w/o pumping"], ncol=1)
sgi.plot(ax=ax2, color="k", x_compat=True)
ax2.axhline(0, linestyle="--", color="k")
droughts = sgi.to_numpy(copy=True)
droughts[droughts > 0] = 0
ax2.fill_between(sgi.index, 0, droughts, color="C0")
sgi2.plot(ax=ax3, color="k", x_compat=True)
ax3.axhline(0, linestyle="--", color="k")
droughts = sgi2.to_numpy(copy=True)
droughts[droughts > 0] = 0
ax3.fill_between(sgi2.index, 0, droughts, color="C1")
ax1.set_ylabel("GWL [m]")
ax1.set_title("Groundwater levels")
ax2.set_ylabel("SGI [-]")
ax2.set_title("SGI With Groundwater pumping")
ax3.set_ylabel("SGI [-]")
ax3.set_title("SGI under 'Natural conditions'")
plt.xlim("1940", "2016");
```
## References
- Bloomfield, J. P. and Marchant, B. P.: [Analysis of groundwater drought building on the standardised precipitation index approach](https://hess.copernicus.org/articles/17/4769/2013/), Hydrol. Earth Syst. Sci., 17, 4769–4787, 2013.
- Marchant, B. and Bloomfield, J.: [Spatio-temporal modelling of the status of groundwater droughts](https://doi.org/10.1016/j.jhydrol.2018.07.009), J. Hydrol., 564, 397–413, 2018
- Collenteur, R., Bakker, M., Klammler, G., and Birk, S. (2021) [Estimation of groundwater recharge from groundwater levels using nonlinear transfer function noise models and comparison to lysimeter data](https://doi.org/10.5194/hess-2020-392), Hydrol. Earth Syst. Sci., 25, 2931–2949.
## Data Sources
- The precipitation and evaporation time series are taken from the Dutch KNMI, meteorological station "de Bilt" (www.knmi.nl).
- The groundwater level time series were downloaded from Dinoloket (www.dinloket.nl).
|
github_jupyter
|
import pandas as pd
import pastas as ps
import matplotlib.pyplot as plt
ps.set_log_level("ERROR")
ps.show_versions()
# Load input data
head = pd.read_csv("../data/B32C0639001.csv", parse_dates=['date'],
index_col='date', squeeze=True)
evap = ps.read_knmi("../data/etmgeg_260.txt", variables="EV24").series * 1e3
rain = ps.read_knmi("../data/etmgeg_260.txt", variables="RH").series * 1e3
# Plot input data
fig, axes = plt.subplots(3,1, figsize=(10,6), sharex=True)
head.plot(ax=axes[0], x_compat=True, linestyle=" ", marker=".")
evap.plot(ax=axes[1], x_compat=True)
rain.plot(ax=axes[2], x_compat=True)
axes[0].set_ylabel("Head [m]")
axes[1].set_ylabel("Evap [mm/d]")
axes[2].set_ylabel("Rain [mm/d]")
plt.xlim("1982", "2005");
# Create the basic Pastas model
ml = ps.Model(head)
# Add a recharge model
rch = ps.rch.FlexModel()
rm = ps.RechargeModel(rain, evap, recharge=rch, rfunc=ps.Exponential, name="rch")
ml.add_stressmodel(rm)
# Solve the model
ml.solve(noise=True, tmin="1990", report=False)
ml.plots.results(figsize=(10, 6));
# Compute the SGI
sim = ml.simulate(tmin="1990")
sgi = ps.stats.sgi(sim.resample("W").mean())
ci = ml.fit.prediction_interval(n=10)
# Make the plot
fig, [ax1, ax2] = plt.subplots(2, 1, figsize=(10,5), sharex=True)
# Upper subplot
sim.plot(ax=ax1, zorder=10)
ml.oseries.series.plot(ax=ax1, linestyle=" ", marker=".", color="k")
ax1.fill_between(ci.index, ci.iloc[:,0], ci.iloc[:,1], color="gray")
ax1.legend(["Simulation", "Observations", "Prediction interval"], ncol=3)
# Lower subplot
sgi.plot(ax=ax2, color="k")
ax2.axhline(0, linestyle="--", color="k")
droughts = sgi.to_numpy(copy=True)
droughts[droughts > 0] = 0
ax2.fill_between(sgi.index, 0, droughts, color="C0")
# Dress up the plot
ax1.set_ylabel("GWL [m]")
ax1.set_title("Groundwater levels")
ax2.set_ylabel("SGI [-]")
ax2.set_title("Standardized Groundwater Index")
# Loads heads and create Pastas model
head2 = ps.read_dino("../data/B32C0609001_1.csv")
ml2 = ps.Model(head2)
# Add a recharge model
rch = ps.rch.FlexModel()
rm = ps.RechargeModel(rain, evap, recharge=rch, rfunc=ps.Exponential, name="rch")
ml2.add_stressmodel(rm)
# Solve and plot the model
ml2.solve(noise=True, tmin="1990", report=False)
ml2.plots.results(figsize=(10, 6));
# Add a linear trend
tm = ps.LinearTrend("1990", "2020", name="trend")
ml2.add_stressmodel(tm)
# Solve the model
#ml2.solve(noise=False, tmin="1990", report=False) # Get better initial estimated first
ml2.solve(noise=True, tmin="1990", report=False)
ml2.plots.results(figsize=(10, 6));
# Compute the SGI
sim = ml2.simulate(tmin="1990")
sgi = ps.stats.sgi(sim.resample("W").mean())
ci = ml2.fit.prediction_interval(n=10)
# Make the plot
fig, [ax1, ax2] = plt.subplots(2, 1, figsize=(10,5), sharex=True)
# Upper subplot
sim.plot(ax=ax1, zorder=10)
ml2.oseries.series.plot(ax=ax1, linestyle=" ", marker=".", color="k")
ax1.fill_between(ci.index, ci.iloc[:,0], ci.iloc[:,1], color="gray")
ax1.legend(["Simulation", "Observations", "Prediction interval"], ncol=3)
# Lower subplot
sgi.plot(ax=ax2, color="k")
ax2.axhline(0, linestyle="--", color="k")
droughts = sgi.to_numpy(copy=True)
droughts[droughts > 0] = 0
ax2.fill_between(sgi.index, 0, droughts, color="C0")
# Dress up the plot
ax1.set_ylabel("GWL [m]")
ax1.set_title("Groundwater levels")
ax2.set_ylabel("SGI [-]")
ax2.set_title("Standardized Groundwater Index");
# Load input data
head = pd.read_csv("data_notebook_9/head.csv", parse_dates=True, index_col=0, squeeze=True)
prec = pd.read_csv("data_notebook_9/prec.csv", parse_dates=True, index_col=0, squeeze=True)
evap = pd.read_csv("data_notebook_9/evap.csv", parse_dates=True, index_col=0, squeeze=True)
well = pd.read_csv("data_notebook_9/well.csv", parse_dates=True, index_col=0, squeeze=True)
# Create the Pastas model
ml3 = ps.Model(head, name="heads")
# Add recharge and a well
sm = ps.RechargeModel(prec, evap, ps.Exponential,
name='rch', recharge=ps.rch.FlexModel())
wm = ps.StressModel(well, ps.Exponential, well.name,
up=False, settings="well")
ml3.add_stressmodel([sm, wm])
# Solve the model
ml3.solve(noise=True, report=False)
ml3.plots.results(figsize=(10, 6));
# Compute the SGI
sim = ml3.simulate(tmin="1940")
sgi = ps.stats.sgi(sim.resample("M").mean())
recharge = ml3.get_contribution("rch", tmin="1940")
sgi2 = ps.stats.sgi(recharge.resample("M").mean())
#ci = ml3.fit.prediction_interval()
# Make the plot
fig, [ax1, ax2, ax3] = plt.subplots(3, 1, figsize=(10,6), sharex=True)
sim.plot(ax=ax1, x_compat=True)
(recharge+ml3.get_parameters("constant")).plot(ax=ax1, linestyle="--")
ml3.oseries.series.plot(ax=ax1, linestyle=" ", marker=".", zorder=-1,
markersize=2, color="k", x_compat=True)
#ax1.fill_between(ci.index, ci.iloc[:,0], ci.iloc[:,1], color="gray")
ax1.legend(["Simulation", "Simulation w/o pumping"], ncol=1)
sgi.plot(ax=ax2, color="k", x_compat=True)
ax2.axhline(0, linestyle="--", color="k")
droughts = sgi.to_numpy(copy=True)
droughts[droughts > 0] = 0
ax2.fill_between(sgi.index, 0, droughts, color="C0")
sgi2.plot(ax=ax3, color="k", x_compat=True)
ax3.axhline(0, linestyle="--", color="k")
droughts = sgi2.to_numpy(copy=True)
droughts[droughts > 0] = 0
ax3.fill_between(sgi2.index, 0, droughts, color="C1")
ax1.set_ylabel("GWL [m]")
ax1.set_title("Groundwater levels")
ax2.set_ylabel("SGI [-]")
ax2.set_title("SGI With Groundwater pumping")
ax3.set_ylabel("SGI [-]")
ax3.set_title("SGI under 'Natural conditions'")
plt.xlim("1940", "2016");
| 0.774796 | 0.990523 |
# Object Detection with SSD
### Here we demostrate detection on example images using SSD with PyTorch
```
import os
import sys
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
import numpy as np
import cv2
if torch.cuda.is_available():
torch.set_default_tensor_type('torch.cuda.FloatTensor')
from ssd import build_ssd
```
## Build SSD300 in Test Phase
1. Build the architecture, specifyingsize of the input image (300),
and number of object classes to score (21 for VOC dataset)
2. Next we load pretrained weights on the VOC0712 trainval dataset
```
net = build_ssd('test', 300, 21) # initialize SSD
net.load_weights('../weights/ssd300_mAP_77.43_v2.pth')
```
## Load Image
### Here we just load a sample image from the VOC07 dataset
```
# image = cv2.imread('./data/example.jpg', cv2.IMREAD_COLOR) # uncomment if dataset not downloaded
%matplotlib inline
from matplotlib import pyplot as plt
from data import VOCDetection, VOC_ROOT, VOCAnnotationTransform
# here we specify year (07 or 12) and dataset ('test', 'val', 'train')
testset = VOCDetection(VOC_ROOT, [('2007', 'val')], None, VOCAnnotationTransform())
img_id = 60
image = testset.pull_image(img_id)
rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# View the sampled input image before transform
plt.figure(figsize=(10,10))
plt.imshow(rgb_image)
plt.show()
```
## Pre-process the input.
#### Using the torchvision package, we can create a Compose of multiple built-in transorm ops to apply
For SSD, at test time we use a custom BaseTransform callable to
resize our image to 300x300, subtract the dataset's mean rgb values,
and swap the color channels for input to SSD300.
```
x = cv2.resize(image, (300, 300)).astype(np.float32)
x -= (104.0, 117.0, 123.0)
x = x.astype(np.float32)
x = x[:, :, ::-1].copy()
plt.imshow(x)
x = torch.from_numpy(x).permute(2, 0, 1)
```
## SSD Forward Pass
### Now just wrap the image in a Variable so it is recognized by PyTorch autograd
```
xx = Variable(x.unsqueeze(0)) # wrap tensor in Variable
if torch.cuda.is_available():
xx = xx.cuda()
y = net(xx)
```
## Parse the Detections and View Results
Filter outputs with confidence scores lower than a threshold
Here we choose 60%
```
from data import VOC_CLASSES as labels
top_k=10
plt.figure(figsize=(10,10))
colors = plt.cm.hsv(np.linspace(0, 1, 21)).tolist()
plt.imshow(rgb_image) # plot the image for matplotlib
currentAxis = plt.gca()
detections = y.data
# scale each detection back up to the image
scale = torch.Tensor(rgb_image.shape[1::-1]).repeat(2)
for i in range(detections.size(1)):
j = 0
while detections[0,i,j,0] >= 0.6:
score = detections[0,i,j,0]
label_name = labels[i-1]
display_txt = '%s: %.2f'%(label_name, score)
pt = (detections[0,i,j,1:]*scale).cpu().numpy()
coords = (pt[0], pt[1]), pt[2]-pt[0]+1, pt[3]-pt[1]+1
color = colors[i]
currentAxis.add_patch(plt.Rectangle(*coords, fill=False, edgecolor=color, linewidth=2))
currentAxis.text(pt[0], pt[1], display_txt, bbox={'facecolor':color, 'alpha':0.5})
j+=1
```
|
github_jupyter
|
import os
import sys
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
import numpy as np
import cv2
if torch.cuda.is_available():
torch.set_default_tensor_type('torch.cuda.FloatTensor')
from ssd import build_ssd
net = build_ssd('test', 300, 21) # initialize SSD
net.load_weights('../weights/ssd300_mAP_77.43_v2.pth')
# image = cv2.imread('./data/example.jpg', cv2.IMREAD_COLOR) # uncomment if dataset not downloaded
%matplotlib inline
from matplotlib import pyplot as plt
from data import VOCDetection, VOC_ROOT, VOCAnnotationTransform
# here we specify year (07 or 12) and dataset ('test', 'val', 'train')
testset = VOCDetection(VOC_ROOT, [('2007', 'val')], None, VOCAnnotationTransform())
img_id = 60
image = testset.pull_image(img_id)
rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# View the sampled input image before transform
plt.figure(figsize=(10,10))
plt.imshow(rgb_image)
plt.show()
x = cv2.resize(image, (300, 300)).astype(np.float32)
x -= (104.0, 117.0, 123.0)
x = x.astype(np.float32)
x = x[:, :, ::-1].copy()
plt.imshow(x)
x = torch.from_numpy(x).permute(2, 0, 1)
xx = Variable(x.unsqueeze(0)) # wrap tensor in Variable
if torch.cuda.is_available():
xx = xx.cuda()
y = net(xx)
from data import VOC_CLASSES as labels
top_k=10
plt.figure(figsize=(10,10))
colors = plt.cm.hsv(np.linspace(0, 1, 21)).tolist()
plt.imshow(rgb_image) # plot the image for matplotlib
currentAxis = plt.gca()
detections = y.data
# scale each detection back up to the image
scale = torch.Tensor(rgb_image.shape[1::-1]).repeat(2)
for i in range(detections.size(1)):
j = 0
while detections[0,i,j,0] >= 0.6:
score = detections[0,i,j,0]
label_name = labels[i-1]
display_txt = '%s: %.2f'%(label_name, score)
pt = (detections[0,i,j,1:]*scale).cpu().numpy()
coords = (pt[0], pt[1]), pt[2]-pt[0]+1, pt[3]-pt[1]+1
color = colors[i]
currentAxis.add_patch(plt.Rectangle(*coords, fill=False, edgecolor=color, linewidth=2))
currentAxis.text(pt[0], pt[1], display_txt, bbox={'facecolor':color, 'alpha':0.5})
j+=1
| 0.267121 | 0.927692 |
# Effect of underresolved meshes on species boundary layers
This notebook highlights the effect of low mesh resolution on diffusive fluxes, convective fluxes, and source terms due to reactions in a standard finite volume discretization of reactive species boundary layers. To obtain realistic profiles, a coupled system of 1D reaction-diffusion equations is solved.
## Dependencies
This notebook has no dependencies to other notebooks or to the data set.
## Mathematical problem
We are looking at a single reaction of type $2A+B\rightarrow P$, where the transfer species $A$ reacts with some bulk component $B$ to the desired product $P$. The reaction-diffusion equations for such a system read
$$
\frac{\mathrm{d}^2 c_A}{\mathrm{d}x^2} = 2k c_A^2 c_B\\
\frac{\mathrm{d}^2 c_B}{\mathrm{d}x^2} = k c_A^2 c_B\\
\frac{\mathrm{d}^2 c_P}{\mathrm{d}x^2} = -k c_A^2 c_B
$$
where $k$ denotes the reaction rate constant. The boundary conditions for $A$ for a domain of length $L$ are $c_A|_{x=0} = 1$ and $c_A|_{x=L} = 0$. The boundary at $x=0$ may be considered as the gas-liquid interface. For species $B$, the gradient at $x=0$ is set to zero, e.g. no flux of $B$ passes the interface, and at $x=L$ a fixed value is applied, e.g. the bulk is always saturated with $B$. Expressed in mathematical terms the same condtions read
$$
\left.\frac{\mathrm{d} c_B}{\mathrm{d}x}\right\vert_{x=0} = 0\quad \text{and} \quad c_B|_{x=L} = 1.
$$
For the product species, at both boundaries the concentration value is set to zero. For $P$ there are several sensible boundary conditions, but the chosen ones are sufficient to create a realistic profile.
## Creating an animation from images
Create a video by running
```
ffmpeg -framerate 10 -i base_name_%03d.png -c:v libx264 -vf scale=1320:-2 -pix_fmt yuv420p your_video.mp4
```
## Solving a single boundary value problem using SciPy
```
import helper_module as hm
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
from scipy.integrate import solve_bvp, trapz
%matplotlib inline
rc('text', usetex=True)
alpha = 0.3
lw = hm.line_width
def func(x, c):
source = 1000*np.power(c[0], 2)*c[2]
return np.vstack((c[1], 2*source, c[3], source, c[5], -source))
def bc(c_a, c_b):
return np.array([c_a[0] - 1.0, c_a[3], c_a[4], c_b[0], c_b[2] - 1.0, c_b[4]])
x = np.linspace(0, 1.0, 100)
c_init = np.zeros((6, x.size))
res = solve_bvp(func, bc, x, c_init)
x_plot = np.linspace(0, 1, 100)
y_plot_a = res.sol(x_plot)[0] # species A
y_plot_b = res.sol(x_plot)[2] # species B
y_plot_p = res.sol(x_plot)[4] # species P
```
## Helper functions
```
def compute_derivative(x, y):
dev_inner = [ (y[i-1] - y[i+1]) / (x[i-1] - x[i+1]) for i in range(1, x.shape[0]-1) ]
dev_0 = [(y[0] - y[1]) / (x[0] - x[1])]
dev_1 = [(y[-2] - y[-1]) / (x[-2] - x[-1])]
return np.asarray(dev_0 + dev_inner + dev_1)
def compute_cell_average(n_cells, n_p, ind):
cell_width = 1.0/n_cells
fi = [0]
while fi[-1] + cell_width < 1.0:
fi.append(fi[-1] + cell_width)
fi.append(1.0)
y_av = []
for cell in range(0, len(fi)-1):
x_cell = np.linspace(fi[cell], fi[cell+1], n_p)
y_cell = res.sol(x_cell)[ind]
y_av.append(trapz(y_cell, x_cell) / (fi[cell+1] - fi[cell]))
return np.asarray(y_av)
def compute_cell_average_simple(profile, n_cells):
n_points = profile.shape[0]
points_per_cell = int(n_points / n_cells)
left_over_points = n_points % points_per_cell
l_bounds = range(0, n_points - points_per_cell - left_over_points + 1, points_per_cell)
u_bounds = range(points_per_cell, n_points - left_over_points + 1, points_per_cell)
profile_av = []
for lb, ub in zip(l_bounds, u_bounds):
profile_av.append(np.sum(profile[lb:ub]) / points_per_cell)
if left_over_points > 0:
profile_av.append(np.sum(profile[-left_over_points:]) / left_over_points)
return np.asarray(profile_av)
def interpolate_2nn(x, y, x_p):
diff = np.absolute(x - x_p)
ind_sort = np.argsort(diff)
w_1 = 1.0 / (diff[ind_sort[0]] + 1.0E-15)
w_2 = 1.0 / (diff[ind_sort[1]] + 1.0E-15)
val = (y[ind_sort[0]] * w_1 + y[ind_sort[1]] * w_2) / (w_1 + w_2)
return val
def compute_discretization_error(x, y, n_cells, ind):
y_av = compute_cell_average(n_cells, 100, ind)
y_dev = compute_derivative(x, y)
cell_width = 1.0 / n_cells
m_0_lin = (y_av[0] - 1.0) / cell_width * 2
m_1_lin = ( y_av[1] - y_av[0]) / cell_width
val_1_lin = 0.5 * (y_av[0] + y_av[1])
m_1_ref = interpolate_2nn(x, y_dev, cell_width)
val_1_ref = interpolate_2nn(x, y, cell_width)
return y_dev[0] - m_0_lin, m_1_ref - m_1_lin, val_1_ref - val_1_lin
def create_base_plot_a(ref_label):
fig = plt.figure(figsize=(16, 10.0))
grid = plt.GridSpec(2, 3, wspace=0.1, hspace=0.3)
ax1 = plt.subplot(grid[0, :])
ax2 = plt.subplot(grid[1, 0])
ax3 = plt.subplot(grid[1, 1])
ax4 = plt.subplot(grid[1, 2])
# upper plot
ax1.plot([], [], ls='-', c='C0', linewidth=lw, label=ref_label)
ax1.set_xlabel(r"$x/\delta_c$", fontsize=hm.fontsize_label)
ax1.tick_params(labelsize=hm.fontsize_tick)
ax1.set_xlim([0.0, 1.0])
ax1.set_ylim([0.0, 1.0])
ax1.legend(fontsize=hm.fontsize_legend, loc=1)
# lower plots
labels = [r"$(\mathrm{d}_x \tilde{c}_{ref} -\mathrm{d}_x \tilde{c}_{num})_{f_0}$",
r"$(\mathrm{d}_x \tilde{c}_{ref} -\mathrm{d}_x \tilde{c}_{num})_{f_1}$",
r"$(\tilde{c}_{ref} - \tilde{c}_{num})_{f_1}$"]
for ax, label in zip([ax2, ax3, ax4], labels):
ax.set_yticks([0.0])
ax.axhline(0.0, lw=2, color="k", ls=":")
ax.set_xlabel(r"$\delta_c / \Delta x$", fontsize=hm.fontsize_label)
ax.tick_params(labelsize=hm.fontsize_tick)
ax.set_xticks([1, 5, 10, 15])
ax.plot([], [], lw=lw, color="C3", ls=":", label=label)
ax.legend(fontsize=hm.fontsize_legend)
return fig, ax1, ax2, ax3, ax4
def create_base_plot_bp(ref_label):
fig = plt.figure(figsize=(16, 10.0))
grid = plt.GridSpec(2, 2, wspace=0.1, hspace=0.3)
ax1 = plt.subplot(grid[0, :])
ax2 = plt.subplot(grid[1, 0])
ax3 = plt.subplot(grid[1, 1])
# upper plot
ax1.plot([], [], ls='-', c='C0', linewidth=lw, label=ref_label)
ax1.set_xlabel(r"$x/\delta_c$", fontsize=hm.fontsize_label)
ax1.tick_params(labelsize=hm.fontsize_tick)
ax1.set_xlim([0.0, 1.0])
ax1.set_ylim([0.0, 1.0])
ax1.legend(fontsize=hm.fontsize_legend, loc=1)
# lower plots
labels = [r"$(\mathrm{d}_x \tilde{c}_{ref} -\mathrm{d}_x \tilde{c}_{num})_{f_1}$",
r"$(\tilde{c}_{ref} - \tilde{c}_{num})_{f_1}$"]
for ax, label in zip([ax2, ax3], labels):
ax.set_yticks([0.0])
ax.axhline(0.0, lw=2, color="k", ls=":")
ax.set_xlabel(r"$\delta_c / \Delta x$", fontsize=hm.fontsize_label)
ax.tick_params(labelsize=hm.fontsize_tick)
ax.set_xticks([1, 5, 10, 15])
ax.plot([], [], lw=lw, color="C3", ls=":", label=label)
ax.legend(fontsize=hm.fontsize_legend)
return fig, ax1, ax2, ax3
def text_formatter(number):
if number < 10:
return "{:2.3f}".format(number)
else:
return "{:2.2f}".format(number)
```
## Transfer species
```
fig, ax1, ax2, ax3, ax4 = create_base_plot_a(ref_label=r"$\tilde{c}_A$")
frames = 100
n_cells_array = 1.01 + np.power(np.linspace(0, 1, frames), 4) * (15.0 - 1.01)
n_cells_array = np.append(n_cells_array, n_cells_array[-2::-1])
bbox = dict(facecolor='white', alpha=1.0, boxstyle='round,pad=0.5')
dev_a = compute_derivative(x_plot, y_plot_a)
diff_m_0 = []
diff_m_1 = []
diff_v_1 = []
for nc in n_cells_array:
m_0, m_1, v_1 = compute_discretization_error(x_plot, y_plot_a, nc, 0)
diff_m_0.append(m_0)
diff_m_1.append(m_1)
diff_v_1.append(v_1)
dot_plot = [
ax2.scatter([], [], marker="o", color="C3", s=100),
ax3.scatter([], [], marker="o", color="C3", s=100),
ax4.scatter([], [], marker="o", color="C3", s=100)
]
# reference solution
ax1.plot(x_plot, y_plot_a, ls='-', c='C0', linewidth=lw)
# discretization error plot
ax2.plot(n_cells_array[:frames], diff_m_0[:frames], lw=lw, color="C3", ls=":")
ax3.plot(n_cells_array[:frames], diff_m_1[:frames], lw=lw, color="C3", ls=":")
ax4.plot(n_cells_array[:frames], diff_v_1[:frames], lw=lw, color="C3", ls=":")
# references to elements which will be updated/deleted during the animation
faces_ind = []
fills = []
approx_line, = ax1.plot([], [], c='C1', linewidth=lw, marker="o", ms=16)
tangent_0, = ax1.plot([], [], ls=":", c="C3", linewidth=lw)
tangent_1, = ax1.plot([], [], ls=":", c="C3", linewidth=lw)
n_cells_text = ax1.text(0.8, 0.5, r"x = 00.00", fontsize=hm.fontsize_label, bbox=bbox)
def update_a(frame):
n_cells = n_cells_array[frame]
cell_width = 1.0 / n_cells
# plot cells and cell averages
cell_av = compute_cell_average(n_cells, 10, 0)
for ind in faces_ind[::-1]:
del ax1.lines[ind]
del faces_ind[:]
for area in fills:
area.remove()
del fills[:]
start = len(ax1.lines)
for fi, av_i in enumerate(cell_av):
faces_ind.append(start)
start += 1
ax1.axvline(1.0/n_cells*fi, 0, 1, linestyle=':', color='k', lw=2)
fills.append(ax1.fill_between([1.0/n_cells*fi, 1.0/n_cells*(fi+1)], [0.0, 0.0], [av_i, av_i],
color="C0", alpha=alpha))
# linear approximation
approx_line.set_data([0.0, 0.5*cell_width, 1.5*cell_width], [1.0, cell_av[0], cell_av[1]])
# tangents at zeros and first cell boundary
tangent_0.set_data([0.0, 0.5 * cell_width], [1.0, 1.0 + 0.5 * cell_width * dev_a[0]])
m_1 = interpolate_2nn(x_plot, dev_a, cell_width)
val_1 = interpolate_2nn(x_plot, y_plot_a, cell_width)
tangent_1.set_data([0.5*cell_width, 1.5*cell_width], [val_1 - 0.5 * cell_width * m_1, val_1 + 0.5 * cell_width * m_1])
# display current resolution as text
number = r"$ \delta_c/\Delta_x = " + text_formatter(n_cells) + r"$"
n_cells_text.set_text(number)
# update discretization error
dot_plot[0]._offsets = np.c_[[n_cells], [interpolate_2nn(n_cells_array, diff_m_0, n_cells)]]
dot_plot[1]._offsets = np.c_[[n_cells], [interpolate_2nn(n_cells_array, diff_m_1, n_cells)]]
dot_plot[2]._offsets = np.c_[[n_cells], [interpolate_2nn(n_cells_array, diff_v_1, n_cells)]]
output_path = "../output/flux_a/"
!mkdir -p {output_path}
for frame in range(2*frames-1):
update_a(frame)
plt.savefig(output_path + "flux_a_{:03d}.png".format(frame), dpi=100, bbox_inches="tight")
```
## Bulk species
```
fig, ax1, ax2, ax3 = create_base_plot_bp(ref_label=r"$\tilde{c}_B$")
frames = 100
n_cells_array = 1.01 + np.power(np.linspace(0, 1, frames), 4) * (15.0 - 1.01)
n_cells_array = np.append(n_cells_array, n_cells_array[-2::-1])
bbox = dict(facecolor='white', alpha=1.0, boxstyle='round,pad=0.5')
dev_b = compute_derivative(x_plot, y_plot_b)
diff_m_0 = []
diff_m_1 = []
diff_v_1 = []
for nc in n_cells_array:
m_0, m_1, v_1 = compute_discretization_error(x_plot, y_plot_b, nc, 2)
diff_m_1.append(m_1)
diff_v_1.append(v_1)
dot_plot = [
ax2.scatter([], [], marker="o", color="C3", s=100),
ax3.scatter([], [], marker="o", color="C3", s=100),
]
# reference solution
ax1.plot(x_plot, y_plot_b, ls='-', c='C0', linewidth=lw)
# discretization error plot
ax2.plot(n_cells_array[:frames], diff_m_1[:frames], lw=lw, color="C3", ls=":")
ax3.plot(n_cells_array[:frames], diff_v_1[:frames], lw=lw, color="C3", ls=":")
# references to elements which will be updated/deleted during the animation
faces_ind = []
fills = []
approx_line, = ax1.plot([], [], c='C1', linewidth=lw, marker="o", ms=16)
tangent_1, = ax1.plot([], [], ls=":", c="C3", linewidth=lw)
n_cells_text = ax1.text(0.1, 0.5, r"x = 00.00", fontsize=hm.fontsize_label, bbox=bbox)
def update_b(frame):
n_cells = n_cells_array[frame]
cell_width = 1.0 / n_cells
# plot cells and cell averages
cell_av = compute_cell_average(n_cells, 10, 2)
for ind in faces_ind[::-1]:
del ax1.lines[ind]
del faces_ind[:]
for area in fills:
area.remove()
del fills[:]
start = len(ax1.lines)
for fi, av_i in enumerate(cell_av):
faces_ind.append(start)
start += 1
ax1.axvline(1.0/n_cells*fi, 0, 1, linestyle=':', color='k', lw=2)
fills.append(ax1.fill_between([1.0/n_cells*fi, 1.0/n_cells*(fi+1)], [0.0, 0.0], [av_i, av_i],
color="C0", alpha=alpha))
# linear approximation
approx_line.set_data([0.5*cell_width, 1.5*cell_width], [cell_av[0], cell_av[1]])
# tangents at first cell boundary
m_1 = interpolate_2nn(x_plot, dev_b, cell_width)
val_1 = interpolate_2nn(x_plot, y_plot_b, cell_width)
tangent_1.set_data([0.5*cell_width, 1.5*cell_width], [val_1 - 0.5 * cell_width * m_1, val_1 + 0.5 * cell_width * m_1])
# display current resolution as text
number = r"$ \delta_c/\Delta_x = " + text_formatter(n_cells) + r"$"
n_cells_text.set_text(number)
# update discretization error
dot_plot[0]._offsets = np.c_[[n_cells], [interpolate_2nn(n_cells_array, diff_m_1, n_cells)]]
dot_plot[1]._offsets = np.c_[[n_cells], [interpolate_2nn(n_cells_array, diff_v_1, n_cells)]]
output_path = "../output/flux_b/"
!mkdir -p {output_path}
for frame in range(2*frames-1):
update_b(frame)
plt.savefig(output_path + "flux_b_{:03d}.png".format(frame), dpi=100, bbox_inches="tight")
```
## Product species
```
fig, ax1, ax2, ax3 = create_base_plot_bp(ref_label=r"$\tilde{c}_P$")
frames = 100
n_cells_array = 1.01 + np.power(np.linspace(0, 1, frames), 4) * (15.0 - 1.01)
n_cells_array = np.append(n_cells_array, n_cells_array[-2::-1])
bbox = dict(facecolor='white', alpha=1.0, boxstyle='round,pad=0.5')
dev_p = compute_derivative(x_plot, y_plot_p)
diff_m_0 = []
diff_m_1 = []
diff_v_1 = []
for nc in n_cells_array:
m_0, m_1, v_1 = compute_discretization_error(x_plot, y_plot_p, nc, 4)
diff_m_1.append(m_1)
diff_v_1.append(v_1)
dot_plot = [
ax2.scatter([], [], marker="o", color="C3", s=100),
ax3.scatter([], [], marker="o", color="C3", s=100),
]
# reference solution
ax1.plot(x_plot, y_plot_p, ls='-', c='C0', linewidth=lw)
# discretization error plot
ax2.plot(n_cells_array[:frames], diff_m_1[:frames], lw=lw, color="C3", ls=":")
ax3.plot(n_cells_array[:frames], diff_v_1[:frames], lw=lw, color="C3", ls=":")
# references to elements which will be updated/deleted during the animation
faces_ind = []
fills = []
approx_line, = ax1.plot([], [], c='C1', linewidth=lw, marker="o", ms=16)
tangent_1, = ax1.plot([], [], ls=":", c="C3", linewidth=lw)
n_cells_text = ax1.text(0.1, 0.5, r"x = 00.00", fontsize=hm.fontsize_label, bbox=bbox)
def update_p(frame):
n_cells = n_cells_array[frame]
cell_width = 1.0 / n_cells
# plot cells and cell averages
cell_av = compute_cell_average(n_cells, 10, 4)
for ind in faces_ind[::-1]:
del ax1.lines[ind]
del faces_ind[:]
for area in fills:
area.remove()
del fills[:]
start = len(ax1.lines)
for fi, av_i in enumerate(cell_av):
faces_ind.append(start)
start += 1
ax1.axvline(1.0/n_cells*fi, 0, 1, linestyle=':', color='k', lw=2)
fills.append(ax1.fill_between([1.0/n_cells*fi, 1.0/n_cells*(fi+1)], [0.0, 0.0], [av_i, av_i],
color="C0", alpha=alpha))
# linear approximation
approx_line.set_data([0.5*cell_width, 1.5*cell_width], [cell_av[0], cell_av[1]])
# tangents at first cell boundary
m_1 = interpolate_2nn(x_plot, dev_p, cell_width)
val_1 = interpolate_2nn(x_plot, y_plot_p, cell_width)
tangent_1.set_data([0.5*cell_width, 1.5*cell_width], [val_1 - 0.5 * cell_width * m_1, val_1 + 0.5 * cell_width * m_1])
# display current resolution as text
number = r"$ \delta_c/\Delta_x = " + text_formatter(n_cells) + r"$"
n_cells_text.set_text(number)
# update discretization error
dot_plot[0]._offsets = np.c_[[n_cells], [interpolate_2nn(n_cells_array, diff_m_1, n_cells)]]
dot_plot[1]._offsets = np.c_[[n_cells], [interpolate_2nn(n_cells_array, diff_v_1, n_cells)]]
output_path = "../output/flux_p/"
!mkdir -p {output_path}
for frame in range(2*frames-1):
update_p(frame)
plt.savefig(output_path + "flux_p_{:03d}.png".format(frame), dpi=100, bbox_inches="tight")
```
|
github_jupyter
|
ffmpeg -framerate 10 -i base_name_%03d.png -c:v libx264 -vf scale=1320:-2 -pix_fmt yuv420p your_video.mp4
import helper_module as hm
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
from scipy.integrate import solve_bvp, trapz
%matplotlib inline
rc('text', usetex=True)
alpha = 0.3
lw = hm.line_width
def func(x, c):
source = 1000*np.power(c[0], 2)*c[2]
return np.vstack((c[1], 2*source, c[3], source, c[5], -source))
def bc(c_a, c_b):
return np.array([c_a[0] - 1.0, c_a[3], c_a[4], c_b[0], c_b[2] - 1.0, c_b[4]])
x = np.linspace(0, 1.0, 100)
c_init = np.zeros((6, x.size))
res = solve_bvp(func, bc, x, c_init)
x_plot = np.linspace(0, 1, 100)
y_plot_a = res.sol(x_plot)[0] # species A
y_plot_b = res.sol(x_plot)[2] # species B
y_plot_p = res.sol(x_plot)[4] # species P
def compute_derivative(x, y):
dev_inner = [ (y[i-1] - y[i+1]) / (x[i-1] - x[i+1]) for i in range(1, x.shape[0]-1) ]
dev_0 = [(y[0] - y[1]) / (x[0] - x[1])]
dev_1 = [(y[-2] - y[-1]) / (x[-2] - x[-1])]
return np.asarray(dev_0 + dev_inner + dev_1)
def compute_cell_average(n_cells, n_p, ind):
cell_width = 1.0/n_cells
fi = [0]
while fi[-1] + cell_width < 1.0:
fi.append(fi[-1] + cell_width)
fi.append(1.0)
y_av = []
for cell in range(0, len(fi)-1):
x_cell = np.linspace(fi[cell], fi[cell+1], n_p)
y_cell = res.sol(x_cell)[ind]
y_av.append(trapz(y_cell, x_cell) / (fi[cell+1] - fi[cell]))
return np.asarray(y_av)
def compute_cell_average_simple(profile, n_cells):
n_points = profile.shape[0]
points_per_cell = int(n_points / n_cells)
left_over_points = n_points % points_per_cell
l_bounds = range(0, n_points - points_per_cell - left_over_points + 1, points_per_cell)
u_bounds = range(points_per_cell, n_points - left_over_points + 1, points_per_cell)
profile_av = []
for lb, ub in zip(l_bounds, u_bounds):
profile_av.append(np.sum(profile[lb:ub]) / points_per_cell)
if left_over_points > 0:
profile_av.append(np.sum(profile[-left_over_points:]) / left_over_points)
return np.asarray(profile_av)
def interpolate_2nn(x, y, x_p):
diff = np.absolute(x - x_p)
ind_sort = np.argsort(diff)
w_1 = 1.0 / (diff[ind_sort[0]] + 1.0E-15)
w_2 = 1.0 / (diff[ind_sort[1]] + 1.0E-15)
val = (y[ind_sort[0]] * w_1 + y[ind_sort[1]] * w_2) / (w_1 + w_2)
return val
def compute_discretization_error(x, y, n_cells, ind):
y_av = compute_cell_average(n_cells, 100, ind)
y_dev = compute_derivative(x, y)
cell_width = 1.0 / n_cells
m_0_lin = (y_av[0] - 1.0) / cell_width * 2
m_1_lin = ( y_av[1] - y_av[0]) / cell_width
val_1_lin = 0.5 * (y_av[0] + y_av[1])
m_1_ref = interpolate_2nn(x, y_dev, cell_width)
val_1_ref = interpolate_2nn(x, y, cell_width)
return y_dev[0] - m_0_lin, m_1_ref - m_1_lin, val_1_ref - val_1_lin
def create_base_plot_a(ref_label):
fig = plt.figure(figsize=(16, 10.0))
grid = plt.GridSpec(2, 3, wspace=0.1, hspace=0.3)
ax1 = plt.subplot(grid[0, :])
ax2 = plt.subplot(grid[1, 0])
ax3 = plt.subplot(grid[1, 1])
ax4 = plt.subplot(grid[1, 2])
# upper plot
ax1.plot([], [], ls='-', c='C0', linewidth=lw, label=ref_label)
ax1.set_xlabel(r"$x/\delta_c$", fontsize=hm.fontsize_label)
ax1.tick_params(labelsize=hm.fontsize_tick)
ax1.set_xlim([0.0, 1.0])
ax1.set_ylim([0.0, 1.0])
ax1.legend(fontsize=hm.fontsize_legend, loc=1)
# lower plots
labels = [r"$(\mathrm{d}_x \tilde{c}_{ref} -\mathrm{d}_x \tilde{c}_{num})_{f_0}$",
r"$(\mathrm{d}_x \tilde{c}_{ref} -\mathrm{d}_x \tilde{c}_{num})_{f_1}$",
r"$(\tilde{c}_{ref} - \tilde{c}_{num})_{f_1}$"]
for ax, label in zip([ax2, ax3, ax4], labels):
ax.set_yticks([0.0])
ax.axhline(0.0, lw=2, color="k", ls=":")
ax.set_xlabel(r"$\delta_c / \Delta x$", fontsize=hm.fontsize_label)
ax.tick_params(labelsize=hm.fontsize_tick)
ax.set_xticks([1, 5, 10, 15])
ax.plot([], [], lw=lw, color="C3", ls=":", label=label)
ax.legend(fontsize=hm.fontsize_legend)
return fig, ax1, ax2, ax3, ax4
def create_base_plot_bp(ref_label):
fig = plt.figure(figsize=(16, 10.0))
grid = plt.GridSpec(2, 2, wspace=0.1, hspace=0.3)
ax1 = plt.subplot(grid[0, :])
ax2 = plt.subplot(grid[1, 0])
ax3 = plt.subplot(grid[1, 1])
# upper plot
ax1.plot([], [], ls='-', c='C0', linewidth=lw, label=ref_label)
ax1.set_xlabel(r"$x/\delta_c$", fontsize=hm.fontsize_label)
ax1.tick_params(labelsize=hm.fontsize_tick)
ax1.set_xlim([0.0, 1.0])
ax1.set_ylim([0.0, 1.0])
ax1.legend(fontsize=hm.fontsize_legend, loc=1)
# lower plots
labels = [r"$(\mathrm{d}_x \tilde{c}_{ref} -\mathrm{d}_x \tilde{c}_{num})_{f_1}$",
r"$(\tilde{c}_{ref} - \tilde{c}_{num})_{f_1}$"]
for ax, label in zip([ax2, ax3], labels):
ax.set_yticks([0.0])
ax.axhline(0.0, lw=2, color="k", ls=":")
ax.set_xlabel(r"$\delta_c / \Delta x$", fontsize=hm.fontsize_label)
ax.tick_params(labelsize=hm.fontsize_tick)
ax.set_xticks([1, 5, 10, 15])
ax.plot([], [], lw=lw, color="C3", ls=":", label=label)
ax.legend(fontsize=hm.fontsize_legend)
return fig, ax1, ax2, ax3
def text_formatter(number):
if number < 10:
return "{:2.3f}".format(number)
else:
return "{:2.2f}".format(number)
fig, ax1, ax2, ax3, ax4 = create_base_plot_a(ref_label=r"$\tilde{c}_A$")
frames = 100
n_cells_array = 1.01 + np.power(np.linspace(0, 1, frames), 4) * (15.0 - 1.01)
n_cells_array = np.append(n_cells_array, n_cells_array[-2::-1])
bbox = dict(facecolor='white', alpha=1.0, boxstyle='round,pad=0.5')
dev_a = compute_derivative(x_plot, y_plot_a)
diff_m_0 = []
diff_m_1 = []
diff_v_1 = []
for nc in n_cells_array:
m_0, m_1, v_1 = compute_discretization_error(x_plot, y_plot_a, nc, 0)
diff_m_0.append(m_0)
diff_m_1.append(m_1)
diff_v_1.append(v_1)
dot_plot = [
ax2.scatter([], [], marker="o", color="C3", s=100),
ax3.scatter([], [], marker="o", color="C3", s=100),
ax4.scatter([], [], marker="o", color="C3", s=100)
]
# reference solution
ax1.plot(x_plot, y_plot_a, ls='-', c='C0', linewidth=lw)
# discretization error plot
ax2.plot(n_cells_array[:frames], diff_m_0[:frames], lw=lw, color="C3", ls=":")
ax3.plot(n_cells_array[:frames], diff_m_1[:frames], lw=lw, color="C3", ls=":")
ax4.plot(n_cells_array[:frames], diff_v_1[:frames], lw=lw, color="C3", ls=":")
# references to elements which will be updated/deleted during the animation
faces_ind = []
fills = []
approx_line, = ax1.plot([], [], c='C1', linewidth=lw, marker="o", ms=16)
tangent_0, = ax1.plot([], [], ls=":", c="C3", linewidth=lw)
tangent_1, = ax1.plot([], [], ls=":", c="C3", linewidth=lw)
n_cells_text = ax1.text(0.8, 0.5, r"x = 00.00", fontsize=hm.fontsize_label, bbox=bbox)
def update_a(frame):
n_cells = n_cells_array[frame]
cell_width = 1.0 / n_cells
# plot cells and cell averages
cell_av = compute_cell_average(n_cells, 10, 0)
for ind in faces_ind[::-1]:
del ax1.lines[ind]
del faces_ind[:]
for area in fills:
area.remove()
del fills[:]
start = len(ax1.lines)
for fi, av_i in enumerate(cell_av):
faces_ind.append(start)
start += 1
ax1.axvline(1.0/n_cells*fi, 0, 1, linestyle=':', color='k', lw=2)
fills.append(ax1.fill_between([1.0/n_cells*fi, 1.0/n_cells*(fi+1)], [0.0, 0.0], [av_i, av_i],
color="C0", alpha=alpha))
# linear approximation
approx_line.set_data([0.0, 0.5*cell_width, 1.5*cell_width], [1.0, cell_av[0], cell_av[1]])
# tangents at zeros and first cell boundary
tangent_0.set_data([0.0, 0.5 * cell_width], [1.0, 1.0 + 0.5 * cell_width * dev_a[0]])
m_1 = interpolate_2nn(x_plot, dev_a, cell_width)
val_1 = interpolate_2nn(x_plot, y_plot_a, cell_width)
tangent_1.set_data([0.5*cell_width, 1.5*cell_width], [val_1 - 0.5 * cell_width * m_1, val_1 + 0.5 * cell_width * m_1])
# display current resolution as text
number = r"$ \delta_c/\Delta_x = " + text_formatter(n_cells) + r"$"
n_cells_text.set_text(number)
# update discretization error
dot_plot[0]._offsets = np.c_[[n_cells], [interpolate_2nn(n_cells_array, diff_m_0, n_cells)]]
dot_plot[1]._offsets = np.c_[[n_cells], [interpolate_2nn(n_cells_array, diff_m_1, n_cells)]]
dot_plot[2]._offsets = np.c_[[n_cells], [interpolate_2nn(n_cells_array, diff_v_1, n_cells)]]
output_path = "../output/flux_a/"
!mkdir -p {output_path}
for frame in range(2*frames-1):
update_a(frame)
plt.savefig(output_path + "flux_a_{:03d}.png".format(frame), dpi=100, bbox_inches="tight")
fig, ax1, ax2, ax3 = create_base_plot_bp(ref_label=r"$\tilde{c}_B$")
frames = 100
n_cells_array = 1.01 + np.power(np.linspace(0, 1, frames), 4) * (15.0 - 1.01)
n_cells_array = np.append(n_cells_array, n_cells_array[-2::-1])
bbox = dict(facecolor='white', alpha=1.0, boxstyle='round,pad=0.5')
dev_b = compute_derivative(x_plot, y_plot_b)
diff_m_0 = []
diff_m_1 = []
diff_v_1 = []
for nc in n_cells_array:
m_0, m_1, v_1 = compute_discretization_error(x_plot, y_plot_b, nc, 2)
diff_m_1.append(m_1)
diff_v_1.append(v_1)
dot_plot = [
ax2.scatter([], [], marker="o", color="C3", s=100),
ax3.scatter([], [], marker="o", color="C3", s=100),
]
# reference solution
ax1.plot(x_plot, y_plot_b, ls='-', c='C0', linewidth=lw)
# discretization error plot
ax2.plot(n_cells_array[:frames], diff_m_1[:frames], lw=lw, color="C3", ls=":")
ax3.plot(n_cells_array[:frames], diff_v_1[:frames], lw=lw, color="C3", ls=":")
# references to elements which will be updated/deleted during the animation
faces_ind = []
fills = []
approx_line, = ax1.plot([], [], c='C1', linewidth=lw, marker="o", ms=16)
tangent_1, = ax1.plot([], [], ls=":", c="C3", linewidth=lw)
n_cells_text = ax1.text(0.1, 0.5, r"x = 00.00", fontsize=hm.fontsize_label, bbox=bbox)
def update_b(frame):
n_cells = n_cells_array[frame]
cell_width = 1.0 / n_cells
# plot cells and cell averages
cell_av = compute_cell_average(n_cells, 10, 2)
for ind in faces_ind[::-1]:
del ax1.lines[ind]
del faces_ind[:]
for area in fills:
area.remove()
del fills[:]
start = len(ax1.lines)
for fi, av_i in enumerate(cell_av):
faces_ind.append(start)
start += 1
ax1.axvline(1.0/n_cells*fi, 0, 1, linestyle=':', color='k', lw=2)
fills.append(ax1.fill_between([1.0/n_cells*fi, 1.0/n_cells*(fi+1)], [0.0, 0.0], [av_i, av_i],
color="C0", alpha=alpha))
# linear approximation
approx_line.set_data([0.5*cell_width, 1.5*cell_width], [cell_av[0], cell_av[1]])
# tangents at first cell boundary
m_1 = interpolate_2nn(x_plot, dev_b, cell_width)
val_1 = interpolate_2nn(x_plot, y_plot_b, cell_width)
tangent_1.set_data([0.5*cell_width, 1.5*cell_width], [val_1 - 0.5 * cell_width * m_1, val_1 + 0.5 * cell_width * m_1])
# display current resolution as text
number = r"$ \delta_c/\Delta_x = " + text_formatter(n_cells) + r"$"
n_cells_text.set_text(number)
# update discretization error
dot_plot[0]._offsets = np.c_[[n_cells], [interpolate_2nn(n_cells_array, diff_m_1, n_cells)]]
dot_plot[1]._offsets = np.c_[[n_cells], [interpolate_2nn(n_cells_array, diff_v_1, n_cells)]]
output_path = "../output/flux_b/"
!mkdir -p {output_path}
for frame in range(2*frames-1):
update_b(frame)
plt.savefig(output_path + "flux_b_{:03d}.png".format(frame), dpi=100, bbox_inches="tight")
fig, ax1, ax2, ax3 = create_base_plot_bp(ref_label=r"$\tilde{c}_P$")
frames = 100
n_cells_array = 1.01 + np.power(np.linspace(0, 1, frames), 4) * (15.0 - 1.01)
n_cells_array = np.append(n_cells_array, n_cells_array[-2::-1])
bbox = dict(facecolor='white', alpha=1.0, boxstyle='round,pad=0.5')
dev_p = compute_derivative(x_plot, y_plot_p)
diff_m_0 = []
diff_m_1 = []
diff_v_1 = []
for nc in n_cells_array:
m_0, m_1, v_1 = compute_discretization_error(x_plot, y_plot_p, nc, 4)
diff_m_1.append(m_1)
diff_v_1.append(v_1)
dot_plot = [
ax2.scatter([], [], marker="o", color="C3", s=100),
ax3.scatter([], [], marker="o", color="C3", s=100),
]
# reference solution
ax1.plot(x_plot, y_plot_p, ls='-', c='C0', linewidth=lw)
# discretization error plot
ax2.plot(n_cells_array[:frames], diff_m_1[:frames], lw=lw, color="C3", ls=":")
ax3.plot(n_cells_array[:frames], diff_v_1[:frames], lw=lw, color="C3", ls=":")
# references to elements which will be updated/deleted during the animation
faces_ind = []
fills = []
approx_line, = ax1.plot([], [], c='C1', linewidth=lw, marker="o", ms=16)
tangent_1, = ax1.plot([], [], ls=":", c="C3", linewidth=lw)
n_cells_text = ax1.text(0.1, 0.5, r"x = 00.00", fontsize=hm.fontsize_label, bbox=bbox)
def update_p(frame):
n_cells = n_cells_array[frame]
cell_width = 1.0 / n_cells
# plot cells and cell averages
cell_av = compute_cell_average(n_cells, 10, 4)
for ind in faces_ind[::-1]:
del ax1.lines[ind]
del faces_ind[:]
for area in fills:
area.remove()
del fills[:]
start = len(ax1.lines)
for fi, av_i in enumerate(cell_av):
faces_ind.append(start)
start += 1
ax1.axvline(1.0/n_cells*fi, 0, 1, linestyle=':', color='k', lw=2)
fills.append(ax1.fill_between([1.0/n_cells*fi, 1.0/n_cells*(fi+1)], [0.0, 0.0], [av_i, av_i],
color="C0", alpha=alpha))
# linear approximation
approx_line.set_data([0.5*cell_width, 1.5*cell_width], [cell_av[0], cell_av[1]])
# tangents at first cell boundary
m_1 = interpolate_2nn(x_plot, dev_p, cell_width)
val_1 = interpolate_2nn(x_plot, y_plot_p, cell_width)
tangent_1.set_data([0.5*cell_width, 1.5*cell_width], [val_1 - 0.5 * cell_width * m_1, val_1 + 0.5 * cell_width * m_1])
# display current resolution as text
number = r"$ \delta_c/\Delta_x = " + text_formatter(n_cells) + r"$"
n_cells_text.set_text(number)
# update discretization error
dot_plot[0]._offsets = np.c_[[n_cells], [interpolate_2nn(n_cells_array, diff_m_1, n_cells)]]
dot_plot[1]._offsets = np.c_[[n_cells], [interpolate_2nn(n_cells_array, diff_v_1, n_cells)]]
output_path = "../output/flux_p/"
!mkdir -p {output_path}
for frame in range(2*frames-1):
update_p(frame)
plt.savefig(output_path + "flux_p_{:03d}.png".format(frame), dpi=100, bbox_inches="tight")
| 0.521227 | 0.983134 |
```
import pandas as pd
import numpy as np
from xbbg import blp, pipeline, const
```
# Reference Data `BDP` and `BDS`
```
blp.__version__
blp.bdp('AAPL US Equity', flds=['Security_Name', 'Last_Price'])
blp.bdp('6758 JP Equity', flds='Crncy_Adj_Mkt_Cap', Eqy_Fund_Crncy='USD')
holders = blp.bds('AMZN US Equity', flds='All_Holders_Public_Filings', cache=True)
(
holders
.loc[:, ~holders.columns.str.contains(
f'holder_id|portfolio_name|change|number|'
f'metro|percent_of_portfolio|source'
)]
.rename(
index=lambda tkr: tkr.replace(' Equity', ''),
columns={
'holder_name_': 'holder',
'position_': 'position',
'filing_date__': 'filing_dt',
'percent_outstanding': 'pct_out',
'insider_status_': 'insider',
}
)
).head()
blp.dividend('SPY US Equity', start_date='2019')
blp.earning('FB US Equity', Eqy_Fund_Year=2018, Number_Of_Periods=2)
```
# Historical Data
Historical data with Excel compatible overrides
```
blp.bdh(
tickers='SHCOMP Index', flds=['high', 'low', 'last_price'],
start_date='2019-11', end_date='2020', Per='W', Fill='P', Days='A',
)
```
Dividend / split adjustments
```
pd.concat([
blp.bdh(
'AAPL US Equity', 'Px_Last', '20140605', '20140610',
CshAdjNormal=True, CshAdjAbnormal=True, CapChg=True
).rename(columns={'Px_Last': 'Px_Adj'}),
blp.bdh(
'AAPL US Equity', 'Px_Last', '20140605', '20140610',
CshAdjNormal=False, CshAdjAbnormal=False, CapChg=False
).rename(columns={'Px_Last': 'Px_Raw'}),
], axis=1)
```
# Intraday Bars
```
cur_dt = pd.Timestamp('today', tz='America/New_York').date()
recent = pd.bdate_range(end=cur_dt, periods=2, tz='America/New_York')
pre_dt = max(filter(lambda dd: dd < cur_dt, recent))
pre_dt.date()
blp.bdib('QQQ US Equity', dt=pre_dt, session='day').tail()
blp.bdib('388 HK Equity', dt=pre_dt, session='am_open_7')
```
# Intraday Tick Data
```
blp.bdtick('QQQ US Equity', dt=pre_dt).tail(10)
```
# Equity Screen `BEQS`
```
blp.beqs('Core Capital Ratios', typ='GLOBAL').iloc[:5, :6]
```
# Subscription
`blp.live` will yield market data as `dict`
```
async for snap in blp.live(['ESA Index', 'NQA Index'], info=const.LIVE_INFO, max_cnt=2):
print(snap)
```
# Pipelines
```
fx = blp.bdib('JPY Curncy', dt=pre_dt)
jp = pd.concat([
blp.bdib(ticker, dt=pre_dt, session='day')
for ticker in ['7974 JP Equity', '9984 JP Equity']
], axis=1)
jp.tail()
```
Get `close` prices and convert to USD
```
prices = (
jp
.pipe(pipeline.get_series, col='close')
.pipe(pipeline.apply_fx, fx=fx)
.tz_convert('Asia/Tokyo')
)
prices.tail()
```
## Customized Pipelines
VWAP for intraday bar data
```
def vwap(data: pd.DataFrame, fx=None, name=None) -> pd.Series:
return pd.Series({
ticker: (
data[ticker][['close', 'volume']].prod(axis=1).sum()
if fx is None else (
data[ticker].close
.pipe(pipeline.apply_fx, fx)
.close
.mul(data[ticker].volume)
.sum()
)
) / data[ticker].volume.sum()
for ticker in data.columns.get_level_values(0).unique()
}, name=name)
```
VWAP in local currency
```
jp.pipe(vwap, name=jp.index[-1].date())
```
VWAP in USD
```
jp.pipe(vwap, fx=fx, name=jp.index[-1].date())
```
Total traded volume as of time in day for past few days
```
jp_hist = pd.concat([
pd.concat([
blp.bdib(ticker, dt=dt, session='day')
for ticker in ['7974 JP Equity', '9984 JP Equity']
], axis=1)
for dt in pd.bdate_range(end='today', periods=10)[:-1]
], sort=False)
unique(jp_hist.index.date)
def drop_zeros(data: pd.DataFrame) -> pd.DataFrame:
return (
data
.replace(0, np.nan)
.dropna(how='all')
.replace(np.nan, 0)
)
def traded_volume(data: pd.DataFrame, asof: str) -> pd.DataFrame:
return (
data
.pipe(pipeline.get_series, col='volume')
.between_time('0:00', asof)
.resample('B')
.sum()
.pipe(drop_zeros)
)
jp_hist.pipe(traded_volume, asof='10:00')
jp_hist.pipe(traded_volume, asof='11:00')
```
|
github_jupyter
|
import pandas as pd
import numpy as np
from xbbg import blp, pipeline, const
blp.__version__
blp.bdp('AAPL US Equity', flds=['Security_Name', 'Last_Price'])
blp.bdp('6758 JP Equity', flds='Crncy_Adj_Mkt_Cap', Eqy_Fund_Crncy='USD')
holders = blp.bds('AMZN US Equity', flds='All_Holders_Public_Filings', cache=True)
(
holders
.loc[:, ~holders.columns.str.contains(
f'holder_id|portfolio_name|change|number|'
f'metro|percent_of_portfolio|source'
)]
.rename(
index=lambda tkr: tkr.replace(' Equity', ''),
columns={
'holder_name_': 'holder',
'position_': 'position',
'filing_date__': 'filing_dt',
'percent_outstanding': 'pct_out',
'insider_status_': 'insider',
}
)
).head()
blp.dividend('SPY US Equity', start_date='2019')
blp.earning('FB US Equity', Eqy_Fund_Year=2018, Number_Of_Periods=2)
blp.bdh(
tickers='SHCOMP Index', flds=['high', 'low', 'last_price'],
start_date='2019-11', end_date='2020', Per='W', Fill='P', Days='A',
)
pd.concat([
blp.bdh(
'AAPL US Equity', 'Px_Last', '20140605', '20140610',
CshAdjNormal=True, CshAdjAbnormal=True, CapChg=True
).rename(columns={'Px_Last': 'Px_Adj'}),
blp.bdh(
'AAPL US Equity', 'Px_Last', '20140605', '20140610',
CshAdjNormal=False, CshAdjAbnormal=False, CapChg=False
).rename(columns={'Px_Last': 'Px_Raw'}),
], axis=1)
cur_dt = pd.Timestamp('today', tz='America/New_York').date()
recent = pd.bdate_range(end=cur_dt, periods=2, tz='America/New_York')
pre_dt = max(filter(lambda dd: dd < cur_dt, recent))
pre_dt.date()
blp.bdib('QQQ US Equity', dt=pre_dt, session='day').tail()
blp.bdib('388 HK Equity', dt=pre_dt, session='am_open_7')
blp.bdtick('QQQ US Equity', dt=pre_dt).tail(10)
blp.beqs('Core Capital Ratios', typ='GLOBAL').iloc[:5, :6]
async for snap in blp.live(['ESA Index', 'NQA Index'], info=const.LIVE_INFO, max_cnt=2):
print(snap)
fx = blp.bdib('JPY Curncy', dt=pre_dt)
jp = pd.concat([
blp.bdib(ticker, dt=pre_dt, session='day')
for ticker in ['7974 JP Equity', '9984 JP Equity']
], axis=1)
jp.tail()
prices = (
jp
.pipe(pipeline.get_series, col='close')
.pipe(pipeline.apply_fx, fx=fx)
.tz_convert('Asia/Tokyo')
)
prices.tail()
def vwap(data: pd.DataFrame, fx=None, name=None) -> pd.Series:
return pd.Series({
ticker: (
data[ticker][['close', 'volume']].prod(axis=1).sum()
if fx is None else (
data[ticker].close
.pipe(pipeline.apply_fx, fx)
.close
.mul(data[ticker].volume)
.sum()
)
) / data[ticker].volume.sum()
for ticker in data.columns.get_level_values(0).unique()
}, name=name)
jp.pipe(vwap, name=jp.index[-1].date())
jp.pipe(vwap, fx=fx, name=jp.index[-1].date())
jp_hist = pd.concat([
pd.concat([
blp.bdib(ticker, dt=dt, session='day')
for ticker in ['7974 JP Equity', '9984 JP Equity']
], axis=1)
for dt in pd.bdate_range(end='today', periods=10)[:-1]
], sort=False)
unique(jp_hist.index.date)
def drop_zeros(data: pd.DataFrame) -> pd.DataFrame:
return (
data
.replace(0, np.nan)
.dropna(how='all')
.replace(np.nan, 0)
)
def traded_volume(data: pd.DataFrame, asof: str) -> pd.DataFrame:
return (
data
.pipe(pipeline.get_series, col='volume')
.between_time('0:00', asof)
.resample('B')
.sum()
.pipe(drop_zeros)
)
jp_hist.pipe(traded_volume, asof='10:00')
jp_hist.pipe(traded_volume, asof='11:00')
| 0.487795 | 0.801858 |
```
"""Simple tutorial using code from the TensorFlow example for Regression.
Parag K. Mital, Jan. 2016"""
# pip3 install --upgrade
# https://storage.googleapis.com/tensorflow/mac/tensorflow-0.6.0-py3-none-any.whl
# %%
%matplotlib inline
import tensorflow as tf
import tensorflow.examples.tutorials.mnist.input_data as input_data
import numpy as np
import matplotlib.pyplot as plt
# %%
# get the classic mnist dataset
# one-hot means a sparse vector for every observation where only
# the class label is 1, and every other class is 0.
# more info here:
# https://www.tensorflow.org/versions/0.6.0/tutorials/mnist/download/index.html#dataset-object
mnist = input_data.read_data_sets('MNIST_data/', one_hot=True)
# %%
# mnist is now a DataSet with accessors for:
# 'train', 'test', and 'validation'.
# within each, we can access:
# images, labels, and num_examples
print(mnist.train.num_examples,
mnist.test.num_examples,
mnist.validation.num_examples)
# %% the images are stored as:
# n_observations x n_features tensor (n-dim array)
# the labels are stored as n_observations x n_labels,
# where each observation is a one-hot vector.
print(mnist.train.images.shape, mnist.train.labels.shape)
# %% the range of the values of the images is from 0-1
print(np.min(mnist.train.images), np.max(mnist.train.images))
# %% we can visualize any one of the images by reshaping it to a 28x28 image
plt.imshow(np.reshape(mnist.train.images[100, :], (28, 28)), cmap='gray')
# %% We can create a container for an input image using tensorflow's graph:
# We allow the first dimension to be None, since this will eventually
# represent our mini-batches, or how many images we feed into a network
# at a time during training/validation/testing.
# The second dimension is the number of features that the image has.
n_input = 784
n_output = 10
net_input = tf.placeholder(tf.float32, [None, n_input])
# %% We can write a simple regression (y = W*x + b) as:
W = tf.Variable(tf.zeros([n_input, n_output]))
b = tf.Variable(tf.zeros([n_output]))
net_output = tf.nn.softmax(tf.matmul(net_input, W) + b)
# %% We'll create a placeholder for the true output of the network
y_true = tf.placeholder(tf.float32, [None, 10])
# %% And then write our loss function:
cross_entropy = -tf.reduce_sum(y_true * tf.log(net_output))
# %% This would equate each label in our one-hot vector between the
# prediction and actual using the argmax as the predicted label
correct_prediction = tf.equal(
tf.argmax(net_output, 1), tf.argmax(y_true, 1))
# %% And now we can look at the mean of our network's correct guesses
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
# %% We can tell the tensorflow graph to train w/ gradient descent using
# our loss function and an input learning rate
optimizer = tf.train.GradientDescentOptimizer(
0.01).minimize(cross_entropy)
# %% We now create a new session to actually perform the initialization the
# variables:
sess = tf.Session()
sess.run(tf.initialize_all_variables())
# %% Now actually do some training:
batch_size = 100
n_epochs = 10
for epoch_i in range(n_epochs):
for batch_i in range(mnist.train.num_examples // batch_size):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
sess.run(optimizer, feed_dict={
net_input: batch_xs,
y_true: batch_ys
})
print(sess.run(accuracy,
feed_dict={
net_input: mnist.validation.images,
y_true: mnist.validation.labels
}))
# %% Print final test accuracy:
print(sess.run(accuracy,
feed_dict={
net_input: mnist.test.images,
y_true: mnist.test.labels
}))
# %%
"""
# We could do the same thing w/ Keras like so:
from keras.models import Sequential
model = Sequential()
from keras.layers.core import Dense, Activation
model.add(Dense(output_dim=10, input_dim=784, init='zero'))
model.add(Activation("softmax"))
from keras.optimizers import SGD
model.compile(loss='categorical_crossentropy',
optimizer=SGD(lr=learning_rate))
model.fit(mnist.train.images, mnist.train.labels, nb_epoch=n_epochs,
batch_size=batch_size, show_accuracy=True)
objective_score = model.evaluate(mnist.test.images, mnist.test.labels,
batch_size=100, show_accuracy=True)
"""
```
|
github_jupyter
|
"""Simple tutorial using code from the TensorFlow example for Regression.
Parag K. Mital, Jan. 2016"""
# pip3 install --upgrade
# https://storage.googleapis.com/tensorflow/mac/tensorflow-0.6.0-py3-none-any.whl
# %%
%matplotlib inline
import tensorflow as tf
import tensorflow.examples.tutorials.mnist.input_data as input_data
import numpy as np
import matplotlib.pyplot as plt
# %%
# get the classic mnist dataset
# one-hot means a sparse vector for every observation where only
# the class label is 1, and every other class is 0.
# more info here:
# https://www.tensorflow.org/versions/0.6.0/tutorials/mnist/download/index.html#dataset-object
mnist = input_data.read_data_sets('MNIST_data/', one_hot=True)
# %%
# mnist is now a DataSet with accessors for:
# 'train', 'test', and 'validation'.
# within each, we can access:
# images, labels, and num_examples
print(mnist.train.num_examples,
mnist.test.num_examples,
mnist.validation.num_examples)
# %% the images are stored as:
# n_observations x n_features tensor (n-dim array)
# the labels are stored as n_observations x n_labels,
# where each observation is a one-hot vector.
print(mnist.train.images.shape, mnist.train.labels.shape)
# %% the range of the values of the images is from 0-1
print(np.min(mnist.train.images), np.max(mnist.train.images))
# %% we can visualize any one of the images by reshaping it to a 28x28 image
plt.imshow(np.reshape(mnist.train.images[100, :], (28, 28)), cmap='gray')
# %% We can create a container for an input image using tensorflow's graph:
# We allow the first dimension to be None, since this will eventually
# represent our mini-batches, or how many images we feed into a network
# at a time during training/validation/testing.
# The second dimension is the number of features that the image has.
n_input = 784
n_output = 10
net_input = tf.placeholder(tf.float32, [None, n_input])
# %% We can write a simple regression (y = W*x + b) as:
W = tf.Variable(tf.zeros([n_input, n_output]))
b = tf.Variable(tf.zeros([n_output]))
net_output = tf.nn.softmax(tf.matmul(net_input, W) + b)
# %% We'll create a placeholder for the true output of the network
y_true = tf.placeholder(tf.float32, [None, 10])
# %% And then write our loss function:
cross_entropy = -tf.reduce_sum(y_true * tf.log(net_output))
# %% This would equate each label in our one-hot vector between the
# prediction and actual using the argmax as the predicted label
correct_prediction = tf.equal(
tf.argmax(net_output, 1), tf.argmax(y_true, 1))
# %% And now we can look at the mean of our network's correct guesses
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
# %% We can tell the tensorflow graph to train w/ gradient descent using
# our loss function and an input learning rate
optimizer = tf.train.GradientDescentOptimizer(
0.01).minimize(cross_entropy)
# %% We now create a new session to actually perform the initialization the
# variables:
sess = tf.Session()
sess.run(tf.initialize_all_variables())
# %% Now actually do some training:
batch_size = 100
n_epochs = 10
for epoch_i in range(n_epochs):
for batch_i in range(mnist.train.num_examples // batch_size):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
sess.run(optimizer, feed_dict={
net_input: batch_xs,
y_true: batch_ys
})
print(sess.run(accuracy,
feed_dict={
net_input: mnist.validation.images,
y_true: mnist.validation.labels
}))
# %% Print final test accuracy:
print(sess.run(accuracy,
feed_dict={
net_input: mnist.test.images,
y_true: mnist.test.labels
}))
# %%
"""
# We could do the same thing w/ Keras like so:
from keras.models import Sequential
model = Sequential()
from keras.layers.core import Dense, Activation
model.add(Dense(output_dim=10, input_dim=784, init='zero'))
model.add(Activation("softmax"))
from keras.optimizers import SGD
model.compile(loss='categorical_crossentropy',
optimizer=SGD(lr=learning_rate))
model.fit(mnist.train.images, mnist.train.labels, nb_epoch=n_epochs,
batch_size=batch_size, show_accuracy=True)
objective_score = model.evaluate(mnist.test.images, mnist.test.labels,
batch_size=100, show_accuracy=True)
"""
| 0.929336 | 0.920932 |
# Temporal-Difference Methods
In this notebook, you will write your own implementations of many Temporal-Difference (TD) methods.
While we have provided some starter code, you are welcome to erase these hints and write your code from scratch.
---
### Part 0: Explore CliffWalkingEnv
We begin by importing the necessary packages.
```
import sys
import gym
import numpy as np
import random
import math
from collections import defaultdict, deque
import matplotlib.pyplot as plt
%matplotlib inline
import check_test
from plot_utils import plot_values
```
Use the code cell below to create an instance of the [CliffWalking](https://github.com/openai/gym/blob/master/gym/envs/toy_text/cliffwalking.py) environment.
```
env = gym.make('CliffWalking-v0')
```
The agent moves through a $4\times 12$ gridworld, with states numbered as follows:
```
[[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35],
[36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47]]
```
At the start of any episode, state `36` is the initial state. State `47` is the only terminal state, and the cliff corresponds to states `37` through `46`.
The agent has 4 potential actions:
```
UP = 0
RIGHT = 1
DOWN = 2
LEFT = 3
```
Thus, $\mathcal{S}^+=\{0, 1, \ldots, 47\}$, and $\mathcal{A} =\{0, 1, 2, 3\}$. Verify this by running the code cell below.
```
print(env.action_space)
print(env.observation_space)
```
In this mini-project, we will build towards finding the optimal policy for the CliffWalking environment. The optimal state-value function is visualized below. Please take the time now to make sure that you understand _why_ this is the optimal state-value function.
_**Note**: You can safely ignore the values of the cliff "states" as these are not true states from which the agent can make decisions. For the cliff "states", the state-value function is not well-defined._
```
# define the optimal state-value function
V_opt = np.zeros((4,12))
V_opt[0:13][0] = -np.arange(3, 15)[::-1]
V_opt[0:13][1] = -np.arange(3, 15)[::-1] + 1
V_opt[0:13][2] = -np.arange(3, 15)[::-1] + 2
V_opt[3][0] = -13
plot_values(V_opt)
```
### Part 1: TD Control: Sarsa
In this section, you will write your own implementation of the Sarsa control algorithm.
Your algorithm has four arguments:
- `env`: This is an instance of an OpenAI Gym environment.
- `num_episodes`: This is the number of episodes that are generated through agent-environment interaction.
- `alpha`: This is the step-size parameter for the update step.
- `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`).
The algorithm returns as output:
- `Q`: This is a dictionary (of one-dimensional arrays) where `Q[s][a]` is the estimated action value corresponding to state `s` and action `a`.
Please complete the function in the code cell below.
(_Feel free to define additional functions to help you to organize your code._)
```
def update_Q_sarsa(alpha, gamma, Q, state, action, reward, next_state=None, next_action=None):
"""Returns updated Q-value for the most recent experience."""
current = Q[state][action] # estimate in Q-table (for current state, action pair)
# get value of state, action pair at next time step
Qsa_next = Q[next_state][next_action] if next_state is not None else 0
target = reward + (gamma * Qsa_next) # construct TD target
new_value = current + (alpha * (target - current)) # get updated value
return new_value
def epsilon_greedy(Q, state, nA, eps):
"""Selects epsilon-greedy action for supplied state.
Params
======
Q (dictionary): action-value function
state (int): current state
nA (int): number actions in the environment
eps (float): epsilon
"""
if random.random() > eps: # select greedy action with probability epsilon
return np.argmax(Q[state])
else: # otherwise, select an action randomly
return random.choice(np.arange(env.action_space.n))
def sarsa(env, num_episodes, alpha, gamma=1.0, plot_every=100):
nA = env.action_space.n
# initialize action-value function (empty dictionary of arrays)
Q = defaultdict(lambda: np.zeros(env.nA))
# monitor performance
tmp_scores = deque(maxlen=plot_every) # deque for keeping track of scores
avg_scores = deque(maxlen=num_episodes) # average scores over every plot_every episodes
# loop over episodes
for i_episode in range(1, num_episodes+1):
# monitor progress
if i_episode % 100 == 0:
print("\rEpisode {}/{}".format(i_episode, num_episodes), end="")
sys.stdout.flush()
## AshD - complete the function
score = 0 # initialize score
state = env.reset() # start episode
eps = 1.0 / i_episode # set value of epsilon
action = epsilon_greedy(Q, state, nA, eps) # epsilon-greedy action selection
while True:
next_state, reward, done, info = env.step(action) # take action A, observe R, S'
score += reward # add reward to agent's score
if not done:
next_action = epsilon_greedy(Q, next_state, nA, eps) # epsilon-greedy action
Q[state][action] = update_Q_sarsa(alpha, gamma, Q, \
state, action, reward, next_state, next_action)
state = next_state # S <- S'
action = next_action # A <- A'
if done:
Q[state][action] = update_Q_sarsa(alpha, gamma, Q, \
state, action, reward)
tmp_scores.append(score) # append score
break
if (i_episode % plot_every == 0):
avg_scores.append(np.mean(tmp_scores))
# plot performance
plt.plot(np.linspace(0,num_episodes,len(avg_scores),endpoint=False), np.asarray(avg_scores))
plt.xlabel('Episode Number')
plt.ylabel('Average Reward (Over Next %d Episodes)' % plot_every)
plt.show()
# print best 100-episode performance
print(('Best Average Reward over %d Episodes: ' % plot_every), np.max(avg_scores))
return Q
```
Use the next code cell to visualize the **_estimated_** optimal policy and the corresponding state-value function.
If the code cell returns **PASSED**, then you have implemented the function correctly! Feel free to change the `num_episodes` and `alpha` parameters that are supplied to the function. However, if you'd like to ensure the accuracy of the unit test, please do not change the value of `gamma` from the default.
```
# obtain the estimated optimal policy and corresponding action-value function
Q_sarsa = sarsa(env, 5000, .01)
# print the estimated optimal policy
policy_sarsa = np.array([np.argmax(Q_sarsa[key]) if key in Q_sarsa else -1 for key in np.arange(48)]).reshape(4,12)
check_test.run_check('td_control_check', policy_sarsa)
print("\nEstimated Optimal Policy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):")
print(policy_sarsa)
# plot the estimated optimal state-value function
V_sarsa = ([np.max(Q_sarsa[key]) if key in Q_sarsa else 0 for key in np.arange(48)])
plot_values(V_sarsa)
```
### Part 2: TD Control: Q-learning
In this section, you will write your own implementation of the Q-learning control algorithm.
Your algorithm has four arguments:
- `env`: This is an instance of an OpenAI Gym environment.
- `num_episodes`: This is the number of episodes that are generated through agent-environment interaction.
- `alpha`: This is the step-size parameter for the update step.
- `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`).
The algorithm returns as output:
- `Q`: This is a dictionary (of one-dimensional arrays) where `Q[s][a]` is the estimated action value corresponding to state `s` and action `a`.
Please complete the function in the code cell below.
(_Feel free to define additional functions to help you to organize your code._)
```
def update_Q_sarsamax(alpha, gamma, Q, state, action, reward, next_state=None):
"""Returns updated Q-value for the most recent experience."""
current = Q[state][action] # estimate in Q-table (for current state, action pair)
# get value of state, action pair at next time step
Qsa_next = np.max(Q[next_state]) if next_state is not None else 0
target = reward + (gamma * Qsa_next) # construct TD target
new_value = current + (alpha * (target - current)) # get updated value
return new_value
def q_learning(env, num_episodes, alpha, gamma=1.0, plot_every=100):
nA = env.action_space.n
# initialize action-value function (empty dictionary of arrays)
Q = defaultdict(lambda: np.zeros(env.nA))
# monitor performance
tmp_scores = deque(maxlen=plot_every) # deque for keeping track of scores
avg_scores = deque(maxlen=num_episodes) # average scores over every plot_every episodes
# loop over episodes
for i_episode in range(1, num_episodes+1):
# monitor progress
if i_episode % 100 == 0:
print("\rEpisode {}/{}".format(i_episode, num_episodes), end="")
sys.stdout.flush()
## AshD - complete the function
score = 0 # initialize score
state = env.reset() # start episode
eps = 1.0 / i_episode # set value of epsilon
while True:
action = epsilon_greedy(Q, state, nA, eps) # epsilon-greedy action selection
next_state, reward, done, info = env.step(action) # take action A, observe R, S'
score += reward # add reward to agent's score
next_action = epsilon_greedy(Q, next_state, nA, eps) # epsilon-greedy action
Q[state][action] = update_Q_sarsamax(alpha, gamma, Q, \
state, action, reward, next_state)
state = next_state # S <- S'
if done:
tmp_scores.append(score) # append score
break
if (i_episode % plot_every == 0):
avg_scores.append(np.mean(tmp_scores))
# plot performance
plt.plot(np.linspace(0,num_episodes,len(avg_scores),endpoint=False), np.asarray(avg_scores))
plt.xlabel('Episode Number')
plt.ylabel('Average Reward (Over Next %d Episodes)' % plot_every)
plt.show()
# print best 100-episode performance
print(('Best Average Reward over %d Episodes: ' % plot_every), np.max(avg_scores))
return Q
```
Use the next code cell to visualize the **_estimated_** optimal policy and the corresponding state-value function.
If the code cell returns **PASSED**, then you have implemented the function correctly! Feel free to change the `num_episodes` and `alpha` parameters that are supplied to the function. However, if you'd like to ensure the accuracy of the unit test, please do not change the value of `gamma` from the default.
```
# obtain the estimated optimal policy and corresponding action-value function
Q_sarsamax = q_learning(env, 5000, .01)
# print the estimated optimal policy
policy_sarsamax = np.array([np.argmax(Q_sarsamax[key]) if key in Q_sarsamax else -1 for key in np.arange(48)]).reshape((4,12))
check_test.run_check('td_control_check', policy_sarsamax)
print("\nEstimated Optimal Policy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):")
print(policy_sarsamax)
# plot the estimated optimal state-value function
plot_values([np.max(Q_sarsamax[key]) if key in Q_sarsamax else 0 for key in np.arange(48)])
```
### Part 3: TD Control: Expected Sarsa
In this section, you will write your own implementation of the Expected Sarsa control algorithm.
Your algorithm has four arguments:
- `env`: This is an instance of an OpenAI Gym environment.
- `num_episodes`: This is the number of episodes that are generated through agent-environment interaction.
- `alpha`: This is the step-size parameter for the update step.
- `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`).
The algorithm returns as output:
- `Q`: This is a dictionary (of one-dimensional arrays) where `Q[s][a]` is the estimated action value corresponding to state `s` and action `a`.
Please complete the function in the code cell below.
(_Feel free to define additional functions to help you to organize your code._)
```
def update_Q_expsarsa(alpha, gamma, Q, state, action, reward, policy_s, next_state=None):
"""Returns updated Q-value for the most recent experience."""
current = Q[state][action] # estimate in Q-table (for current state, action pair)
# get value of state, action pair at next time step
Qsa_next = np.dot(Q[next_state], policy_s,)
target = reward + (gamma * Qsa_next) # construct TD target
new_value = current + (alpha * (target - current)) # get updated value
return new_value
def get_probs(Q_s, epsilon, nA):
""" obtains the action probabilities corresponding to epsilon-greedy policy """
policy_s = np.ones(nA) * epsilon / nA
best_a = np.argmax(Q_s)
policy_s[best_a] = 1 - epsilon + epsilon / nA
return policy_s
def expected_sarsa(env, num_episodes, alpha, gamma=1.0, plot_every=100):
nA = env.action_space.n
# initialize action-value function (empty dictionary of arrays)
Q = defaultdict(lambda: np.zeros(env.nA))
# monitor performance
tmp_scores = deque(maxlen=plot_every) # deque for keeping track of scores
avg_scores = deque(maxlen=num_episodes) # average scores over every plot_every episodes
# loop over episodes
for i_episode in range(1, num_episodes+1):
# monitor progress
if i_episode % 100 == 0:
print("\rEpisode {}/{}".format(i_episode, num_episodes), end="")
sys.stdout.flush()
## AshD - complete the function
score = 0 # initialize score
state = env.reset() # start episode
eps = 0.005 #1.0 / i_episode # set value of epsilon
while True:
action = epsilon_greedy(Q, state, nA, eps) # epsilon-greedy action selection
next_state, reward, done, info = env.step(action) # take action A, observe R, S'
score += reward # add reward to agent's score
# design policy rule
policy_s = get_probs(Q[next_state], eps, nA)
# update Q value
Q[state][action] = update_Q_expsarsa(alpha, gamma, Q, \
state, action, reward, policy_s, next_state)
state = next_state # S <- S'
if done:
tmp_scores.append(score) # append score
break
if (i_episode % plot_every == 0):
avg_scores.append(np.mean(tmp_scores))
# plot performance
plt.plot(np.linspace(0,num_episodes,len(avg_scores),endpoint=False), np.asarray(avg_scores))
plt.xlabel('Episode Number')
plt.ylabel('Average Reward (Over Next %d Episodes)' % plot_every)
plt.show()
# print best 100-episode performance
print(('Best Average Reward over %d Episodes: ' % plot_every), np.max(avg_scores))
return Q
```
Use the next code cell to visualize the **_estimated_** optimal policy and the corresponding state-value function.
If the code cell returns **PASSED**, then you have implemented the function correctly! Feel free to change the `num_episodes` and `alpha` parameters that are supplied to the function. However, if you'd like to ensure the accuracy of the unit test, please do not change the value of `gamma` from the default.
```
# obtain the estimated optimal policy and corresponding action-value function
Q_expsarsa = expected_sarsa(env, 5000, 1)
# print the estimated optimal policy
policy_expsarsa = np.array([np.argmax(Q_expsarsa[key]) if key in Q_expsarsa else -1 for key in np.arange(48)]).reshape(4,12)
check_test.run_check('td_control_check', policy_expsarsa)
print("\nEstimated Optimal Policy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):")
print(policy_expsarsa)
# plot the estimated optimal state-value function
plot_values([np.max(Q_expsarsa[key]) if key in Q_expsarsa else 0 for key in np.arange(48)])
```
|
github_jupyter
|
import sys
import gym
import numpy as np
import random
import math
from collections import defaultdict, deque
import matplotlib.pyplot as plt
%matplotlib inline
import check_test
from plot_utils import plot_values
env = gym.make('CliffWalking-v0')
[[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35],
[36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47]]
UP = 0
RIGHT = 1
DOWN = 2
LEFT = 3
print(env.action_space)
print(env.observation_space)
# define the optimal state-value function
V_opt = np.zeros((4,12))
V_opt[0:13][0] = -np.arange(3, 15)[::-1]
V_opt[0:13][1] = -np.arange(3, 15)[::-1] + 1
V_opt[0:13][2] = -np.arange(3, 15)[::-1] + 2
V_opt[3][0] = -13
plot_values(V_opt)
def update_Q_sarsa(alpha, gamma, Q, state, action, reward, next_state=None, next_action=None):
"""Returns updated Q-value for the most recent experience."""
current = Q[state][action] # estimate in Q-table (for current state, action pair)
# get value of state, action pair at next time step
Qsa_next = Q[next_state][next_action] if next_state is not None else 0
target = reward + (gamma * Qsa_next) # construct TD target
new_value = current + (alpha * (target - current)) # get updated value
return new_value
def epsilon_greedy(Q, state, nA, eps):
"""Selects epsilon-greedy action for supplied state.
Params
======
Q (dictionary): action-value function
state (int): current state
nA (int): number actions in the environment
eps (float): epsilon
"""
if random.random() > eps: # select greedy action with probability epsilon
return np.argmax(Q[state])
else: # otherwise, select an action randomly
return random.choice(np.arange(env.action_space.n))
def sarsa(env, num_episodes, alpha, gamma=1.0, plot_every=100):
nA = env.action_space.n
# initialize action-value function (empty dictionary of arrays)
Q = defaultdict(lambda: np.zeros(env.nA))
# monitor performance
tmp_scores = deque(maxlen=plot_every) # deque for keeping track of scores
avg_scores = deque(maxlen=num_episodes) # average scores over every plot_every episodes
# loop over episodes
for i_episode in range(1, num_episodes+1):
# monitor progress
if i_episode % 100 == 0:
print("\rEpisode {}/{}".format(i_episode, num_episodes), end="")
sys.stdout.flush()
## AshD - complete the function
score = 0 # initialize score
state = env.reset() # start episode
eps = 1.0 / i_episode # set value of epsilon
action = epsilon_greedy(Q, state, nA, eps) # epsilon-greedy action selection
while True:
next_state, reward, done, info = env.step(action) # take action A, observe R, S'
score += reward # add reward to agent's score
if not done:
next_action = epsilon_greedy(Q, next_state, nA, eps) # epsilon-greedy action
Q[state][action] = update_Q_sarsa(alpha, gamma, Q, \
state, action, reward, next_state, next_action)
state = next_state # S <- S'
action = next_action # A <- A'
if done:
Q[state][action] = update_Q_sarsa(alpha, gamma, Q, \
state, action, reward)
tmp_scores.append(score) # append score
break
if (i_episode % plot_every == 0):
avg_scores.append(np.mean(tmp_scores))
# plot performance
plt.plot(np.linspace(0,num_episodes,len(avg_scores),endpoint=False), np.asarray(avg_scores))
plt.xlabel('Episode Number')
plt.ylabel('Average Reward (Over Next %d Episodes)' % plot_every)
plt.show()
# print best 100-episode performance
print(('Best Average Reward over %d Episodes: ' % plot_every), np.max(avg_scores))
return Q
# obtain the estimated optimal policy and corresponding action-value function
Q_sarsa = sarsa(env, 5000, .01)
# print the estimated optimal policy
policy_sarsa = np.array([np.argmax(Q_sarsa[key]) if key in Q_sarsa else -1 for key in np.arange(48)]).reshape(4,12)
check_test.run_check('td_control_check', policy_sarsa)
print("\nEstimated Optimal Policy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):")
print(policy_sarsa)
# plot the estimated optimal state-value function
V_sarsa = ([np.max(Q_sarsa[key]) if key in Q_sarsa else 0 for key in np.arange(48)])
plot_values(V_sarsa)
def update_Q_sarsamax(alpha, gamma, Q, state, action, reward, next_state=None):
"""Returns updated Q-value for the most recent experience."""
current = Q[state][action] # estimate in Q-table (for current state, action pair)
# get value of state, action pair at next time step
Qsa_next = np.max(Q[next_state]) if next_state is not None else 0
target = reward + (gamma * Qsa_next) # construct TD target
new_value = current + (alpha * (target - current)) # get updated value
return new_value
def q_learning(env, num_episodes, alpha, gamma=1.0, plot_every=100):
nA = env.action_space.n
# initialize action-value function (empty dictionary of arrays)
Q = defaultdict(lambda: np.zeros(env.nA))
# monitor performance
tmp_scores = deque(maxlen=plot_every) # deque for keeping track of scores
avg_scores = deque(maxlen=num_episodes) # average scores over every plot_every episodes
# loop over episodes
for i_episode in range(1, num_episodes+1):
# monitor progress
if i_episode % 100 == 0:
print("\rEpisode {}/{}".format(i_episode, num_episodes), end="")
sys.stdout.flush()
## AshD - complete the function
score = 0 # initialize score
state = env.reset() # start episode
eps = 1.0 / i_episode # set value of epsilon
while True:
action = epsilon_greedy(Q, state, nA, eps) # epsilon-greedy action selection
next_state, reward, done, info = env.step(action) # take action A, observe R, S'
score += reward # add reward to agent's score
next_action = epsilon_greedy(Q, next_state, nA, eps) # epsilon-greedy action
Q[state][action] = update_Q_sarsamax(alpha, gamma, Q, \
state, action, reward, next_state)
state = next_state # S <- S'
if done:
tmp_scores.append(score) # append score
break
if (i_episode % plot_every == 0):
avg_scores.append(np.mean(tmp_scores))
# plot performance
plt.plot(np.linspace(0,num_episodes,len(avg_scores),endpoint=False), np.asarray(avg_scores))
plt.xlabel('Episode Number')
plt.ylabel('Average Reward (Over Next %d Episodes)' % plot_every)
plt.show()
# print best 100-episode performance
print(('Best Average Reward over %d Episodes: ' % plot_every), np.max(avg_scores))
return Q
# obtain the estimated optimal policy and corresponding action-value function
Q_sarsamax = q_learning(env, 5000, .01)
# print the estimated optimal policy
policy_sarsamax = np.array([np.argmax(Q_sarsamax[key]) if key in Q_sarsamax else -1 for key in np.arange(48)]).reshape((4,12))
check_test.run_check('td_control_check', policy_sarsamax)
print("\nEstimated Optimal Policy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):")
print(policy_sarsamax)
# plot the estimated optimal state-value function
plot_values([np.max(Q_sarsamax[key]) if key in Q_sarsamax else 0 for key in np.arange(48)])
def update_Q_expsarsa(alpha, gamma, Q, state, action, reward, policy_s, next_state=None):
"""Returns updated Q-value for the most recent experience."""
current = Q[state][action] # estimate in Q-table (for current state, action pair)
# get value of state, action pair at next time step
Qsa_next = np.dot(Q[next_state], policy_s,)
target = reward + (gamma * Qsa_next) # construct TD target
new_value = current + (alpha * (target - current)) # get updated value
return new_value
def get_probs(Q_s, epsilon, nA):
""" obtains the action probabilities corresponding to epsilon-greedy policy """
policy_s = np.ones(nA) * epsilon / nA
best_a = np.argmax(Q_s)
policy_s[best_a] = 1 - epsilon + epsilon / nA
return policy_s
def expected_sarsa(env, num_episodes, alpha, gamma=1.0, plot_every=100):
nA = env.action_space.n
# initialize action-value function (empty dictionary of arrays)
Q = defaultdict(lambda: np.zeros(env.nA))
# monitor performance
tmp_scores = deque(maxlen=plot_every) # deque for keeping track of scores
avg_scores = deque(maxlen=num_episodes) # average scores over every plot_every episodes
# loop over episodes
for i_episode in range(1, num_episodes+1):
# monitor progress
if i_episode % 100 == 0:
print("\rEpisode {}/{}".format(i_episode, num_episodes), end="")
sys.stdout.flush()
## AshD - complete the function
score = 0 # initialize score
state = env.reset() # start episode
eps = 0.005 #1.0 / i_episode # set value of epsilon
while True:
action = epsilon_greedy(Q, state, nA, eps) # epsilon-greedy action selection
next_state, reward, done, info = env.step(action) # take action A, observe R, S'
score += reward # add reward to agent's score
# design policy rule
policy_s = get_probs(Q[next_state], eps, nA)
# update Q value
Q[state][action] = update_Q_expsarsa(alpha, gamma, Q, \
state, action, reward, policy_s, next_state)
state = next_state # S <- S'
if done:
tmp_scores.append(score) # append score
break
if (i_episode % plot_every == 0):
avg_scores.append(np.mean(tmp_scores))
# plot performance
plt.plot(np.linspace(0,num_episodes,len(avg_scores),endpoint=False), np.asarray(avg_scores))
plt.xlabel('Episode Number')
plt.ylabel('Average Reward (Over Next %d Episodes)' % plot_every)
plt.show()
# print best 100-episode performance
print(('Best Average Reward over %d Episodes: ' % plot_every), np.max(avg_scores))
return Q
# obtain the estimated optimal policy and corresponding action-value function
Q_expsarsa = expected_sarsa(env, 5000, 1)
# print the estimated optimal policy
policy_expsarsa = np.array([np.argmax(Q_expsarsa[key]) if key in Q_expsarsa else -1 for key in np.arange(48)]).reshape(4,12)
check_test.run_check('td_control_check', policy_expsarsa)
print("\nEstimated Optimal Policy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):")
print(policy_expsarsa)
# plot the estimated optimal state-value function
plot_values([np.max(Q_expsarsa[key]) if key in Q_expsarsa else 0 for key in np.arange(48)])
| 0.505615 | 0.962988 |
# Homework 4
### Due Date: Tuesday, September 26th at 11:59 PM
## Problem 1
The file `circles.txt` contains measurements of circle radii. Your task is to write a script that reports the average area of the circles. You will **not** use the `numpy` `mean` function. Here are the requirements:
1. Open `circles.txt`, read in the data, and convert the data to floats.
2. Write a function that computes the area of a circle.
3. Write a function, called `myave`, that computes the average of the areas of the circles. At the very least, `myave` should accept the list of radii as one argument and the circle function that you wrote in step 2 as another argument. There are other ways of doing this task, but I want you to do it this way.
4. Print out the result.
## Problem 2
The goal of this problem is to write a simple bank account withdraw system. The problem is based off of one in _Structure and Interpretation of Computer Programs_.
**Instructions:** Do each part in a different cell block and clearly label each part.
### Part 1
Write a closure to make withdraws from a bank account. The outer function should be accept the initial balance as an argument (I'll refer to this argument as `balance` in this problem statement, but you can call it whatever you want). The inner function should accept the withdraw amount as an argument and return the new balance.
**NOTE1:** For this part, do not try to reassign `balance` in the inner function. Just see what happens when you return a new balance. You can store the new balance in a new name (call it `new_bal` if you want) or just return the new balance directly.
**NOTE2:** You may want to check for basic exceptions (e.g. attempting to withdraw more than the current balance).
Once you write your functions, demo them in your notebook as follows:
```python
wd = make_withdraw(init_balance)
wd(withdraw_amount)
wd(new_withdraw_amount)
```
You should observe that this does not behave correctly. Why not?
### Part 2
You can fix things up by updating `balance` within the inner function. But this won't work. Try it out and explain why it doesn't work. Try to use the language that we used in lecture. **Hint:** [Python Execution Model](https://docs.python.org/3/reference/executionmodel.html).
### Part 3
Now, make just one small change to your code from Part 2. Declare `balance` as a nonlocal variable using the nonlocal keyword. That is, make the first line of the inner function, say `nonlocal balance`. Here's some information on the `nonlocal` statement: [`nonlocal`](https://docs.python.org/3/reference/simple_stmts.html#nonlocal).
Now test things out like you did in Part 1. It should be behaving correctly now.
### Part 4
Finally, visualize your code with [Python Tutor](http://pythontutor.com/) and embed your visualization in your notebook. Pay attention to the variable `balance`.
## Problem 3
Let's return to the data from Problem 1. Write two functions: 1.) The first function should return the average circle area (you can re-use the one you already wrote if you'd like, but you might need to update it slightly for this problem) and 2.) The second function should just use `numpy` to compute the average.
Write a decorator to time the evaluation of each function. You can use the timing decorator from lecture.
#### Notes and Hints
1. Be fair!
2. To be as fair as possible, do the following:
1. Create an areas list/array _outside_ of your averaging functions. This means that you should do a loop over the radii you read in from `circles.txt`, compute the area from each point, and store that area in an array. Do you know why this is more fair? Also, try to not use `append`. Instead, preallocate space for your `area` list/array.
2. Your `my_ave` function should accept your areas data as a list. Remember, to allocate a list you should do `[0.0]*N`: if you use such a construct your list will will be filled in with zeros.
3. Your `np_ave` function should accept your areas data as a `numpy` array. To allocate a `numpy` array do `areas_np = np.zeros(len(radii))`.
4. Now both functions are using the best data types possible for their tasks.
## Problem 4
Write a decorator to check if a quantity returned from a function is positive. An exception should be raised if the quantity is not positive.
Write three functions and decorate them with your decorator:
1. A function that returns the discriminant $\displaystyle d = b^{2} - 4ac$
2. A function that computes the absolute value (you must write this yourself...do not use built-ins)
3. A function of your own choosing.
Show that your decorator behaves correctly. That is, for each function, show two cases (where applicable):
1. A case where positivity is not violated
2. A case where positivity is violated
|
github_jupyter
|
wd = make_withdraw(init_balance)
wd(withdraw_amount)
wd(new_withdraw_amount)
| 0.152001 | 0.97857 |
```
library(qvalue)
library(dplyr)
library(ggplot2)
library(cowplot)
# common genes, i.e. genes tested for all aggregations
df_common_genes = read.csv("/hps/nobackup/stegle/users/acuomo/all_scripts/sc_eqtl/common_genes_across_all_aggregation_methods_and_bulk.csv")
common_genes = as.character(df_common_genes$gene)
length(common_genes)
# highly variable genes (top 50% CV)
Giordanos_selection = "/hps/nobackup/stegle/users/galvari/data/iPSCs/singleCell/metadata/ensembl_gene/Ensembl_75_Gene_CV_quant5.txt"
G_file = read.csv(Giordanos_selection, sep = "\t")
nrow(G_file)
head(G_file,2)
#### load bulk results
##### a-bulk (all results)
bulk_folder = "/hps/nobackup/hipsci/scratch/ComparingQtlMapping/Bulk/BulkTotal_PCA20/"
res_all = read.csv(paste0(bulk_folder,"qtl_results_all.txt"), sep = "\t")
# a-bulk (lead SNP only, to get significance threshold at FDR<10%)
leads_all = read.csv(paste0(bulk_folder,"top_qtl_results_all.txt"), sep = "\t")
leads_all = leads_all[leads_all$feature_id %in% common_genes,]
leads_all$q_value = qvalue(leads_all$empirical_feature_p_value)$qvalues
max_qval = max(leads_all[leads_all$q_value < 0.1,'q_value'])
emp_pv_star_all = max(leads_all[leads_all$q_value==max_qval,"empirical_feature_p_value"])
emp_pv_star_all
##### m-bulk (all results)
bulk_matched_folder = "/hps/nobackup/hipsci/scratch/ComparingQtlMapping/Bulk/BulkDay0Healthy_PCA20_88_206/"
res = read.csv(paste0(bulk_matched_folder,"qtl_results_all.txt"), sep = "\t")
# m-bulk (lead SNP only, to get significance threshold at FDR<10%)
leads = read.csv(paste0(bulk_matched_folder,"top_qtl_results_all.txt"), sep = "\t")
leads = leads[leads$feature_id %in% common_genes,]
leads$q_value = qvalue(leads$empirical_feature_p_value)$qvalues
max_qval = max(leads[leads$q_value < 0.1,'q_value'])
emp_pv_star_matched = max(leads[leads$q_value==max_qval,"empirical_feature_p_value"])
emp_pv_star_matched
### dr-mean
dir0 = "/hps/nobackup/hipsci/scratch/ComparingQtlMapping/SingleCell/PseudoBulk/mean/Run_Output_PCA20_88_log_TPM_scater_libsize_206/"
df0 = read.csv(paste0(dir0,"top_qtl_results_all.txt"), sep="\t")
df0 = df0[df0$feature_id %in% common_genes,]
head(df0,2)
# first consider eQTL tested in all sets
eqtls_dr = unique(paste0(df0$feature_id,"-",df0$snp_id))
eqtls_m = unique(paste0(res$feature_id,"-",res$snp_id))
eqtls_a = unique(paste0(res_all$feature_id,"-",res_all$snp_id))
eqtls0 = eqtls_dr[eqtls_dr %in% eqtls_m]
common_eqtls = eqtls0[eqtls0 %in% eqtls_a]
length(common_eqtls)
# then assess replication in either bulk sets
df0$q_value = qvalue(df0$empirical_feature_p_value)$qvalues
df1 = df0[df0$q_value < 0.05,]
df1 = df1[-which(duplicated(df1$feature_id)),]
sc_eqtls = unique(paste0(df1$feature_id,"-",df1$snp_id))
sc_eqtls = sc_eqtls[sc_eqtls %in% common_eqtls]
length(sc_eqtls)
# single-cell eQTL (dr-mean) replicated in m-bulk
df2 = inner_join(df1, res, by = c("snp_id","feature_id"), suffix = c(".sc",".bulk"))
df3 = df2[(df2$empirical_feature_p_value.bulk < emp_pv_star_matched &
(df2$beta.sc*df2$beta.bulk)>0),]
sc_eqtls_m = unique(paste0(df3$feature_id,"-",df3$snp_id))
length(sc_eqtls_m)
# single-cell eQTL (dr-mean) replicated in a-bulk
df4 = inner_join(df1, res_all, by = c("snp_id","feature_id"), suffix = c(".sc",".bulk"))
df5 = df4[(df4$empirical_feature_p_value.bulk < emp_pv_star_all &
(df4$beta.sc*df4$beta.bulk)>0),]
sc_eqtls_a = unique(paste0(df5$feature_id,"-",df5$snp_id))
length(sc_eqtls_a)
# next, define three catagories:
# 1. single-cell eQTL replicated in both m-bulk and a-bulk
# 2. single-cell eQTL replicated in a-bulk but NOT m-bulk
# 3. single-cell eQTL not replicated in either
eqtls1 = sc_eqtls_m[sc_eqtls_m %in% sc_eqtls_a]
eqtls2 = sc_eqtls_a[!(sc_eqtls_a %in% sc_eqtls_m)]
eqtls3 = sc_eqtls[!(sc_eqtls %in% eqtls1)]
length(eqtls1)
length(eqtls2)
length(eqtls3)
df1$eqtl = paste0(df1$feature_id,"-",df1$snp_id)
df_to_plot = df1[df1$eqtl %in% sc_eqtls,c("feature_id","snp_id","eqtl","beta")]
nrow(df_to_plot)
head(df_to_plot,2)
# get mean, variance (and CV2) for the genes
# consider effect size in the a) single-cell run, b) bulk run (probably a-bulk)
G_file$feature_id = G_file$gene
df_to_plot2 = inner_join(df_to_plot, G_file, by = "feature_id")
nrow(df_to_plot2)
head(df_to_plot2,2)
df_to_plot2$m_bulk = df_to_plot2$eqtl %in% sc_eqtls_m
df_to_plot2$a_bulk = df_to_plot2$eqtl %in% sc_eqtls_a
df_to_plot2$abs_sc_beta = abs(df_to_plot2$beta)
head(df_to_plot2,2)
df_to_plot2[df_to_plot2$m_bulk & df_to_plot2$a_bulk,"category"] = "both"
df_to_plot2[df_to_plot2$m_bulk & !df_to_plot2$a_bulk,"category"] = "only_m_bulk"
df_to_plot2[!df_to_plot2$m_bulk & df_to_plot2$a_bulk,"category"] = "only_a_bulk"
df_to_plot2[!df_to_plot2$m_bulk & !df_to_plot2$a_bulk,"category"] = "neither"
tail(df_to_plot2)
df_to_plot2 %>% group_by(category) %>% summarise(eGenes = n())
df_to_plot2$category = factor(df_to_plot2$category, levels = c("both", "only_m_bulk","only_a_bulk","neither"))
df_to_plot2 = df_to_plot2[df_to_plot2$category %in% c("both", "only_a_bulk","neither"),]
options(repr.plot.width = 6, repr.plot.height = 8)
p1 = df_to_plot2 %>% group_by(category) %>% summarise(eGenes = n()) %>%
ggplot(aes(x = category, y = eGenes, fill = category)) + geom_bar(stat="identity") + theme_classic()
p2 = ggplot(df_to_plot2, aes(x = category, y = log2(mean+1), fill= category)) + geom_violin() + geom_boxplot(width=0.2) + theme_classic()
p3 = ggplot(df_to_plot2, aes(x = category, y = log2(std**2+1),fill= category)) + geom_violin() + geom_boxplot(width=0.2) + theme_classic()
p4 = ggplot(df_to_plot2, aes(x = category, y = abs_sc_beta, fill= category)) + geom_violin() + geom_boxplot(width=0.2) + theme_classic()
# plot_grid(p1,p2,p3,p4,ncol=1)
### bulk stats
mb_stats_filename = "/hps/nobackup/stegle/users/acuomo/all_scripts/sc_eqtl/stats_HipSci_passQc_87.txt"
ab_stats_filename = "/hps/nobackup/stegle/users/acuomo/all_scripts/sc_eqtl/stats_HipSci_passQc.txt"
mb_stats = read.csv(mb_stats_filename, sep = "\t", row.names = 1)
head(mb_stats,2)
ab_stats = read.csv(ab_stats_filename, sep = "\t", row.names = 1)
head(ab_stats,2)
mb_stats$feature_id = rownames(mb_stats)
ab_stats$feature_id = rownames(ab_stats)
df_to_plot3 = inner_join(df_to_plot2, mb_stats)
head(df_to_plot3,2)
# hist(df_to_plot3$FractionOfZero)
# hist(log2(df_to_plot3$MeanTPM_All+1))
mean(df_to_plot3[df_to_plot3$category == "both","FractionOfZero"])
mean(df_to_plot3[df_to_plot3$category == "neither","FractionOfZero"])
mean(df_to_plot3[df_to_plot3$category == "only_a_bulk","FractionOfZero"])
options(repr.plot.width = 6, repr.plot.height = 8)
p5 = ggplot(df_to_plot3, aes(x = category, y = FractionOfZero, fill= category)) + geom_violin() + theme_classic() #+ ylim(c(0,0.025)) #+ geom_boxplot(width=0.2)
p6 = ggplot(df_to_plot3, aes(x = category, y = log2(MeanTPM_nonZero+1), fill= category)) + geom_violin() + geom_boxplot(width=0.2) + theme_classic()
p7 = ggplot(df_to_plot3, aes(x = category, y = log2(VarTPM_nonZero+1), fill= category)) + geom_violin() + geom_boxplot(width=0.2) + theme_classic()
# plot_grid(p1,p2,p3,p4,ncol=1)
# plot_grid(p1,p5,p6,p7,ncol=1)
# p6
nrow(df_to_plot3)
df_to_plot4 = inner_join(df_to_plot3, res, by = c("snp_id","feature_id"), suffix = c("sc","bulk"))
nrow(df_to_plot4)
p8 = ggplot(df_to_plot4, aes(x = category, y = abs(betabulk), fill= category)) + geom_violin() + geom_boxplot(width=0.2) + theme_classic()
# plot_grid(p1,p6,p7,p8,ncol=1)
df_to_plot5 = inner_join(df_to_plot2, ab_stats)
df_to_plot6 = inner_join(df_to_plot5, res, by = c("snp_id","feature_id"), suffix = c("sc","bulk"))
p9 = ggplot(df_to_plot5, aes(x = category, y = FractionOfZero, fill= category)) + geom_violin() + theme_classic() #+ ylim(c(0,0.025)) #+ geom_boxplot(width=0.2)
p10 = ggplot(df_to_plot5, aes(x = category, y = log2(MeanTPM_nonZero+1), fill= category)) + geom_violin() + geom_boxplot(width=0.2) + theme_classic()
p11 = ggplot(df_to_plot5, aes(x = category, y = log2(VarTPM_nonZero+1), fill= category)) + geom_violin() + geom_boxplot(width=0.2) + theme_classic()
p12 = ggplot(df_to_plot6, aes(x = category, y = abs(betabulk), fill= category)) + geom_violin() + geom_boxplot(width=0.2) + theme_classic()
# plot_grid(p1,p9,p10,p11,p12,ncol=1)
type.col <- c(both = "#336b87", only_a_bulk = "#f98866", only_m_bulk = "grey", neither="#66a5ad")
options(repr.plot.width = 7, repr.plot.height = 12)
plot_grid(p1+scale_fill_manual(values = type.col),
p2+scale_fill_manual(values = type.col),
p3+scale_fill_manual(values = type.col),
p4+scale_fill_manual(values = type.col),
p10+scale_fill_manual(values = type.col),
p11+scale_fill_manual(values = type.col),
p12+scale_fill_manual(values = type.col), ncol=1)
fig_dir = "/hps/nobackup/stegle/users/acuomo/all_scripts/sc_eqtl/figures/"
pdf(paste0(fig_dir,"eqtl_properties_sc_bulk.pdf"), width=7, height=12)
plot_grid(p1+scale_fill_manual(values = type.col),
p2+scale_fill_manual(values = type.col),
p3+scale_fill_manual(values = type.col),
p4+scale_fill_manual(values = type.col),
p10+scale_fill_manual(values = type.col),
p11+scale_fill_manual(values = type.col),
p12+scale_fill_manual(values = type.col), ncol=1)
dev.off()
```
|
github_jupyter
|
library(qvalue)
library(dplyr)
library(ggplot2)
library(cowplot)
# common genes, i.e. genes tested for all aggregations
df_common_genes = read.csv("/hps/nobackup/stegle/users/acuomo/all_scripts/sc_eqtl/common_genes_across_all_aggregation_methods_and_bulk.csv")
common_genes = as.character(df_common_genes$gene)
length(common_genes)
# highly variable genes (top 50% CV)
Giordanos_selection = "/hps/nobackup/stegle/users/galvari/data/iPSCs/singleCell/metadata/ensembl_gene/Ensembl_75_Gene_CV_quant5.txt"
G_file = read.csv(Giordanos_selection, sep = "\t")
nrow(G_file)
head(G_file,2)
#### load bulk results
##### a-bulk (all results)
bulk_folder = "/hps/nobackup/hipsci/scratch/ComparingQtlMapping/Bulk/BulkTotal_PCA20/"
res_all = read.csv(paste0(bulk_folder,"qtl_results_all.txt"), sep = "\t")
# a-bulk (lead SNP only, to get significance threshold at FDR<10%)
leads_all = read.csv(paste0(bulk_folder,"top_qtl_results_all.txt"), sep = "\t")
leads_all = leads_all[leads_all$feature_id %in% common_genes,]
leads_all$q_value = qvalue(leads_all$empirical_feature_p_value)$qvalues
max_qval = max(leads_all[leads_all$q_value < 0.1,'q_value'])
emp_pv_star_all = max(leads_all[leads_all$q_value==max_qval,"empirical_feature_p_value"])
emp_pv_star_all
##### m-bulk (all results)
bulk_matched_folder = "/hps/nobackup/hipsci/scratch/ComparingQtlMapping/Bulk/BulkDay0Healthy_PCA20_88_206/"
res = read.csv(paste0(bulk_matched_folder,"qtl_results_all.txt"), sep = "\t")
# m-bulk (lead SNP only, to get significance threshold at FDR<10%)
leads = read.csv(paste0(bulk_matched_folder,"top_qtl_results_all.txt"), sep = "\t")
leads = leads[leads$feature_id %in% common_genes,]
leads$q_value = qvalue(leads$empirical_feature_p_value)$qvalues
max_qval = max(leads[leads$q_value < 0.1,'q_value'])
emp_pv_star_matched = max(leads[leads$q_value==max_qval,"empirical_feature_p_value"])
emp_pv_star_matched
### dr-mean
dir0 = "/hps/nobackup/hipsci/scratch/ComparingQtlMapping/SingleCell/PseudoBulk/mean/Run_Output_PCA20_88_log_TPM_scater_libsize_206/"
df0 = read.csv(paste0(dir0,"top_qtl_results_all.txt"), sep="\t")
df0 = df0[df0$feature_id %in% common_genes,]
head(df0,2)
# first consider eQTL tested in all sets
eqtls_dr = unique(paste0(df0$feature_id,"-",df0$snp_id))
eqtls_m = unique(paste0(res$feature_id,"-",res$snp_id))
eqtls_a = unique(paste0(res_all$feature_id,"-",res_all$snp_id))
eqtls0 = eqtls_dr[eqtls_dr %in% eqtls_m]
common_eqtls = eqtls0[eqtls0 %in% eqtls_a]
length(common_eqtls)
# then assess replication in either bulk sets
df0$q_value = qvalue(df0$empirical_feature_p_value)$qvalues
df1 = df0[df0$q_value < 0.05,]
df1 = df1[-which(duplicated(df1$feature_id)),]
sc_eqtls = unique(paste0(df1$feature_id,"-",df1$snp_id))
sc_eqtls = sc_eqtls[sc_eqtls %in% common_eqtls]
length(sc_eqtls)
# single-cell eQTL (dr-mean) replicated in m-bulk
df2 = inner_join(df1, res, by = c("snp_id","feature_id"), suffix = c(".sc",".bulk"))
df3 = df2[(df2$empirical_feature_p_value.bulk < emp_pv_star_matched &
(df2$beta.sc*df2$beta.bulk)>0),]
sc_eqtls_m = unique(paste0(df3$feature_id,"-",df3$snp_id))
length(sc_eqtls_m)
# single-cell eQTL (dr-mean) replicated in a-bulk
df4 = inner_join(df1, res_all, by = c("snp_id","feature_id"), suffix = c(".sc",".bulk"))
df5 = df4[(df4$empirical_feature_p_value.bulk < emp_pv_star_all &
(df4$beta.sc*df4$beta.bulk)>0),]
sc_eqtls_a = unique(paste0(df5$feature_id,"-",df5$snp_id))
length(sc_eqtls_a)
# next, define three catagories:
# 1. single-cell eQTL replicated in both m-bulk and a-bulk
# 2. single-cell eQTL replicated in a-bulk but NOT m-bulk
# 3. single-cell eQTL not replicated in either
eqtls1 = sc_eqtls_m[sc_eqtls_m %in% sc_eqtls_a]
eqtls2 = sc_eqtls_a[!(sc_eqtls_a %in% sc_eqtls_m)]
eqtls3 = sc_eqtls[!(sc_eqtls %in% eqtls1)]
length(eqtls1)
length(eqtls2)
length(eqtls3)
df1$eqtl = paste0(df1$feature_id,"-",df1$snp_id)
df_to_plot = df1[df1$eqtl %in% sc_eqtls,c("feature_id","snp_id","eqtl","beta")]
nrow(df_to_plot)
head(df_to_plot,2)
# get mean, variance (and CV2) for the genes
# consider effect size in the a) single-cell run, b) bulk run (probably a-bulk)
G_file$feature_id = G_file$gene
df_to_plot2 = inner_join(df_to_plot, G_file, by = "feature_id")
nrow(df_to_plot2)
head(df_to_plot2,2)
df_to_plot2$m_bulk = df_to_plot2$eqtl %in% sc_eqtls_m
df_to_plot2$a_bulk = df_to_plot2$eqtl %in% sc_eqtls_a
df_to_plot2$abs_sc_beta = abs(df_to_plot2$beta)
head(df_to_plot2,2)
df_to_plot2[df_to_plot2$m_bulk & df_to_plot2$a_bulk,"category"] = "both"
df_to_plot2[df_to_plot2$m_bulk & !df_to_plot2$a_bulk,"category"] = "only_m_bulk"
df_to_plot2[!df_to_plot2$m_bulk & df_to_plot2$a_bulk,"category"] = "only_a_bulk"
df_to_plot2[!df_to_plot2$m_bulk & !df_to_plot2$a_bulk,"category"] = "neither"
tail(df_to_plot2)
df_to_plot2 %>% group_by(category) %>% summarise(eGenes = n())
df_to_plot2$category = factor(df_to_plot2$category, levels = c("both", "only_m_bulk","only_a_bulk","neither"))
df_to_plot2 = df_to_plot2[df_to_plot2$category %in% c("both", "only_a_bulk","neither"),]
options(repr.plot.width = 6, repr.plot.height = 8)
p1 = df_to_plot2 %>% group_by(category) %>% summarise(eGenes = n()) %>%
ggplot(aes(x = category, y = eGenes, fill = category)) + geom_bar(stat="identity") + theme_classic()
p2 = ggplot(df_to_plot2, aes(x = category, y = log2(mean+1), fill= category)) + geom_violin() + geom_boxplot(width=0.2) + theme_classic()
p3 = ggplot(df_to_plot2, aes(x = category, y = log2(std**2+1),fill= category)) + geom_violin() + geom_boxplot(width=0.2) + theme_classic()
p4 = ggplot(df_to_plot2, aes(x = category, y = abs_sc_beta, fill= category)) + geom_violin() + geom_boxplot(width=0.2) + theme_classic()
# plot_grid(p1,p2,p3,p4,ncol=1)
### bulk stats
mb_stats_filename = "/hps/nobackup/stegle/users/acuomo/all_scripts/sc_eqtl/stats_HipSci_passQc_87.txt"
ab_stats_filename = "/hps/nobackup/stegle/users/acuomo/all_scripts/sc_eqtl/stats_HipSci_passQc.txt"
mb_stats = read.csv(mb_stats_filename, sep = "\t", row.names = 1)
head(mb_stats,2)
ab_stats = read.csv(ab_stats_filename, sep = "\t", row.names = 1)
head(ab_stats,2)
mb_stats$feature_id = rownames(mb_stats)
ab_stats$feature_id = rownames(ab_stats)
df_to_plot3 = inner_join(df_to_plot2, mb_stats)
head(df_to_plot3,2)
# hist(df_to_plot3$FractionOfZero)
# hist(log2(df_to_plot3$MeanTPM_All+1))
mean(df_to_plot3[df_to_plot3$category == "both","FractionOfZero"])
mean(df_to_plot3[df_to_plot3$category == "neither","FractionOfZero"])
mean(df_to_plot3[df_to_plot3$category == "only_a_bulk","FractionOfZero"])
options(repr.plot.width = 6, repr.plot.height = 8)
p5 = ggplot(df_to_plot3, aes(x = category, y = FractionOfZero, fill= category)) + geom_violin() + theme_classic() #+ ylim(c(0,0.025)) #+ geom_boxplot(width=0.2)
p6 = ggplot(df_to_plot3, aes(x = category, y = log2(MeanTPM_nonZero+1), fill= category)) + geom_violin() + geom_boxplot(width=0.2) + theme_classic()
p7 = ggplot(df_to_plot3, aes(x = category, y = log2(VarTPM_nonZero+1), fill= category)) + geom_violin() + geom_boxplot(width=0.2) + theme_classic()
# plot_grid(p1,p2,p3,p4,ncol=1)
# plot_grid(p1,p5,p6,p7,ncol=1)
# p6
nrow(df_to_plot3)
df_to_plot4 = inner_join(df_to_plot3, res, by = c("snp_id","feature_id"), suffix = c("sc","bulk"))
nrow(df_to_plot4)
p8 = ggplot(df_to_plot4, aes(x = category, y = abs(betabulk), fill= category)) + geom_violin() + geom_boxplot(width=0.2) + theme_classic()
# plot_grid(p1,p6,p7,p8,ncol=1)
df_to_plot5 = inner_join(df_to_plot2, ab_stats)
df_to_plot6 = inner_join(df_to_plot5, res, by = c("snp_id","feature_id"), suffix = c("sc","bulk"))
p9 = ggplot(df_to_plot5, aes(x = category, y = FractionOfZero, fill= category)) + geom_violin() + theme_classic() #+ ylim(c(0,0.025)) #+ geom_boxplot(width=0.2)
p10 = ggplot(df_to_plot5, aes(x = category, y = log2(MeanTPM_nonZero+1), fill= category)) + geom_violin() + geom_boxplot(width=0.2) + theme_classic()
p11 = ggplot(df_to_plot5, aes(x = category, y = log2(VarTPM_nonZero+1), fill= category)) + geom_violin() + geom_boxplot(width=0.2) + theme_classic()
p12 = ggplot(df_to_plot6, aes(x = category, y = abs(betabulk), fill= category)) + geom_violin() + geom_boxplot(width=0.2) + theme_classic()
# plot_grid(p1,p9,p10,p11,p12,ncol=1)
type.col <- c(both = "#336b87", only_a_bulk = "#f98866", only_m_bulk = "grey", neither="#66a5ad")
options(repr.plot.width = 7, repr.plot.height = 12)
plot_grid(p1+scale_fill_manual(values = type.col),
p2+scale_fill_manual(values = type.col),
p3+scale_fill_manual(values = type.col),
p4+scale_fill_manual(values = type.col),
p10+scale_fill_manual(values = type.col),
p11+scale_fill_manual(values = type.col),
p12+scale_fill_manual(values = type.col), ncol=1)
fig_dir = "/hps/nobackup/stegle/users/acuomo/all_scripts/sc_eqtl/figures/"
pdf(paste0(fig_dir,"eqtl_properties_sc_bulk.pdf"), width=7, height=12)
plot_grid(p1+scale_fill_manual(values = type.col),
p2+scale_fill_manual(values = type.col),
p3+scale_fill_manual(values = type.col),
p4+scale_fill_manual(values = type.col),
p10+scale_fill_manual(values = type.col),
p11+scale_fill_manual(values = type.col),
p12+scale_fill_manual(values = type.col), ncol=1)
dev.off()
| 0.393385 | 0.290937 |
```
DATASET = 'no_dataset'
VERSION = 0
BATCH_SIZE = 32
EPOCHS = 10
# Parameters
DATASET = "human_enhancers_ensembl"
print(DATASET, VERSION, BATCH_SIZE, EPOCHS)
```
## Config
```
import os
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchtext.data.utils import get_tokenizer
from genomic_benchmarks.dataset_getters.pytorch_datasets import get_dataset
from genomic_benchmarks.models.torch_cnn import CNN
from genomic_benchmarks.dataset_getters.utils import coll_factory, LetterTokenizer, build_vocab, check_seq_lengths, check_config, VARIABLE_LENGTH_DATASETS
from genomic_benchmarks.data_check import list_datasets, info
USE_PADDING = DATASET in VARIABLE_LENGTH_DATASETS
config = {
"dataset": DATASET,
"dataset_version": VERSION,
"epochs": EPOCHS,
"batch_size": BATCH_SIZE,
"use_padding": USE_PADDING,
"force_download": False,
"run_on_gpu": True,
"number_of_classes": 2,
"embedding_dim": 100,
}
check_config(config)
```
## Choose the dataset
```
train_dset = get_dataset(config["dataset"], 'train')
```
## Tokenizer and vocab
```
tokenizer = get_tokenizer(LetterTokenizer())
vocabulary = build_vocab(train_dset, tokenizer, use_padding=config["use_padding"])
print("vocab len:" ,vocabulary.__len__())
print(vocabulary.get_stoi())
```
## Dataloader and batch preparation
```
# Run on GPU or CPU
device = 'cuda' if config["run_on_gpu"] and torch.cuda.is_available() else 'cpu'
print('Using {} device'.format(device))
max_seq_len, nn_input_len = check_seq_lengths(dataset=train_dset, config=config)
# Data Loader
if(config["use_padding"]):
collate = coll_factory(vocabulary, tokenizer, device, pad_to_length = nn_input_len)
else:
collate = coll_factory(vocabulary, tokenizer, device, pad_to_length = None)
train_loader = DataLoader(train_dset, batch_size=config["batch_size"], shuffle=True, collate_fn=collate)
```
## Model
```
model = CNN(
number_of_classes=config["number_of_classes"],
vocab_size=vocabulary.__len__(),
embedding_dim=config["embedding_dim"],
input_len=nn_input_len
).to(device)
```
## Training
```
model.train(train_loader, epochs=config["epochs"])
```
## Testing
```
# test_dset = get_dataset_fn('test', force_download=config["force_download"], version=config["dataset_version"])
test_dset = get_dataset(config["dataset"], 'test')
test_loader = DataLoader(test_dset, batch_size=config["batch_size"], shuffle=True, collate_fn=collate)
model.test(test_loader)
```
|
github_jupyter
|
DATASET = 'no_dataset'
VERSION = 0
BATCH_SIZE = 32
EPOCHS = 10
# Parameters
DATASET = "human_enhancers_ensembl"
print(DATASET, VERSION, BATCH_SIZE, EPOCHS)
import os
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchtext.data.utils import get_tokenizer
from genomic_benchmarks.dataset_getters.pytorch_datasets import get_dataset
from genomic_benchmarks.models.torch_cnn import CNN
from genomic_benchmarks.dataset_getters.utils import coll_factory, LetterTokenizer, build_vocab, check_seq_lengths, check_config, VARIABLE_LENGTH_DATASETS
from genomic_benchmarks.data_check import list_datasets, info
USE_PADDING = DATASET in VARIABLE_LENGTH_DATASETS
config = {
"dataset": DATASET,
"dataset_version": VERSION,
"epochs": EPOCHS,
"batch_size": BATCH_SIZE,
"use_padding": USE_PADDING,
"force_download": False,
"run_on_gpu": True,
"number_of_classes": 2,
"embedding_dim": 100,
}
check_config(config)
train_dset = get_dataset(config["dataset"], 'train')
tokenizer = get_tokenizer(LetterTokenizer())
vocabulary = build_vocab(train_dset, tokenizer, use_padding=config["use_padding"])
print("vocab len:" ,vocabulary.__len__())
print(vocabulary.get_stoi())
# Run on GPU or CPU
device = 'cuda' if config["run_on_gpu"] and torch.cuda.is_available() else 'cpu'
print('Using {} device'.format(device))
max_seq_len, nn_input_len = check_seq_lengths(dataset=train_dset, config=config)
# Data Loader
if(config["use_padding"]):
collate = coll_factory(vocabulary, tokenizer, device, pad_to_length = nn_input_len)
else:
collate = coll_factory(vocabulary, tokenizer, device, pad_to_length = None)
train_loader = DataLoader(train_dset, batch_size=config["batch_size"], shuffle=True, collate_fn=collate)
model = CNN(
number_of_classes=config["number_of_classes"],
vocab_size=vocabulary.__len__(),
embedding_dim=config["embedding_dim"],
input_len=nn_input_len
).to(device)
model.train(train_loader, epochs=config["epochs"])
# test_dset = get_dataset_fn('test', force_download=config["force_download"], version=config["dataset_version"])
test_dset = get_dataset(config["dataset"], 'test')
test_loader = DataLoader(test_dset, batch_size=config["batch_size"], shuffle=True, collate_fn=collate)
model.test(test_loader)
| 0.68595 | 0.479991 |
# maysics.stats模块使用说明
stats模块有两个类
|名称|作用|
|---|---|
|DF1d|一维分布拟合|
|DFT|单个分布拟合检验|
stats模块有个函数七个函数
|名称|作用|
|---|---|
|r_moment|原点矩|
|ex|数学期望|
|c_moment|中心矩|
|dx|方差|
|skew|偏度|
|kurt|峰度|
|mle|最大似然法|
<br></br>
## 一维分布拟合:DF1d
通过散点的频率分布拟合出概率密度函数
<br>用插值法得到具体函数表达式
DF1d(sample, span, kind='linear')
<br>```sample```是样本点
<br>```span```是区间间隔,如span = [a, b, c]则将区间分为[a, b]和[b, c],并统计各区间频率
<br>```kind```:将插值类型指定为字符串('linear'、'nearest'、'zero'、'slinear'、'squardic'、'previous'、'next',其中'zero'、'slinear'、'squared'和'cubic'表示零阶、一阶、二阶或三阶样条曲线插值;'previous'和'next'只返回点的上一个或下一个值)或作为一个整数指定要使用的样条曲线插值器的顺序。
<br>DF1d类含有两个方法:```show```、```savefig```
<br>分别用于显示和保存概率密度函数图像
### DEMO 1-1:拟合概率密度函数$f(x)$,并输出$f(0)$
```
from maysics.stats import DF1d
import numpy as np
np.random.seed(100)
data = np.random.normal(0, 1, 10000)
# -1.5至1.5,每0.1作为一个间隔,统计各个间隔的频率
span = np.arange(-1.5, 1.6, 0.1)
df1d = DF1d(data, span)
df1d.f(0)
```
### DEMO 1-2:拟合概率密度函数$f(x)$,并绘制函数图像
```
from maysics.stats import DF1d
import numpy as np
np.random.seed(100)
data = np.random.normal(0, 1, 10000)
# -1.5至1.5,每0.1作为一个间隔,统计各个间隔的频率
span = np.arange(-1.5, 1.6, 0.1)
df1d = DF1d(data, span)
df1d.show()
```
<br></br>
## 单个分布拟合检验:DFT
检验分布与数据的实际分布是否一致
DFT(func_type='pdf')
<br>```func_type```表示检验的函数类型,可选pdf(概率密度函数)、cdf(概率分布函数)、dis(离散分布)
<br>DFT类有三个方法:```fit```、```show```、```savefig```
<br>fit方法用于计算,show和savefig分别用于显示和保存图像
<br>DFT类有三个属性:```degree```、```chi2_value```、```P```
<br>degree表示卡方分布的自由度
<br>chi2_value表示卡方值
<br>P表示拒绝假设的错误概率
### fit方法:
fit(data, func, args={}, acc=0.1)
<br>```data```是待检验的数据集
<br>```func```是待检验的概率函数
<br>```args```用于传递func函数的其他参数
<br>```acc```仅用于```func_type='pdf'```和```func_type='cdf'```,表示积分的精度
### show和savefig方法:
两个方法绘制的图像有两个:
<br>1、原始数据的小提琴图,包括分布情况、四分位点、平均点
<br>2、func的函数图像
<br>需要注意的是,图像的纵坐标是经过缩放的,因此其数值没有具体意义
<br>两个都有一个默认参数:```acc=0.01```
<br>仅在func_type为'pdf'和'cdf'时有效,表示绘图精度
### DEMO 2-1:检验标准正态分布的概率密度函数并绘制图像
```
from maysics.stats import DFT
import numpy as np
from scipy.stats import norm
np.random.seed(100)
data = np.random.normal(0, 1, (100,))
# 分布拟合
dft = DFT()
dft.fit(data, norm.pdf, args={'loc':0, 'scale':1})
print('卡方分布的自由度:', dft.degree)
print('卡方值:', dft.chi2_value)
print('拒绝假设的错误概率:', dft.P)
# 绘图
dft.show()
```
### DEMO 2-2:检验标准正态分布的概率分布函数并绘制图像
```
from maysics.stats import DFT
import numpy as np
from scipy.stats import norm
np.random.seed(100)
data = np.random.normal(0, 1, (100,))
# 分布拟合
dft = DFT('cdf')
dft.fit(data, norm.cdf, args={'loc':0, 'scale':1})
print('卡方分布的自由度:', dft.degree)
print('卡方值:', dft.chi2_value)
print('拒绝假设的错误概率:', dft.P)
# 绘图
dft.show()
```
### DEMO 2-3:检验泊松分布并绘制图像
```
from maysics.stats import DFT
import numpy as np
from scipy.special import factorial
from scipy.stats import poisson
np.random.seed(100)
data = np.random.poisson(lam=2, size=100)
# 分布拟合
dft = DFT('pmf')
dft.fit(data, poisson.pmf, args={'mu':2})
print('卡方分布的自由度:', dft.degree)
print('卡方值:', dft.chi2_value)
print('拒绝假设的错误概率:', dft.P)
# 绘图
dft.show()
```
<br></br>
## 数字特征:r_moment、ex、c_moment、dx、skew、kurt
ex等价于一阶原点矩
<br>dx等价于二阶中心矩
<br>skew等价于三阶中心矩
<br>kurt等价于四阶中心矩
### DEMO 3-1:求在0,1上均匀分布的数据集数字特征
```
import maysics as ms
import numpy as np
np.random.seed(100)
data = np.random.rand(100)
print('一阶原点矩:', ms.stats.r_moment(data))
print('期望:', ms.stats.ex(data))
print('一阶中心矩:', ms.stats.c_moment(data))
print('二阶中心矩:', ms.stats.c_moment(data, k=2))
print('方差:', ms.stats.dx(data))
print('三阶中心矩:', ms.stats.c_moment(data, k=3))
print('偏度:', ms.stats.skew(data))
print('四阶中心矩:', ms.stats.c_moment(data, k=4))
print('峰度:', ms.stats.kurt(data))
```
### DEMO 3-2:求标准正态分布概率密度函数的数字特征
```
import maysics as ms
import numpy as np
from scipy.stats import norm
p_range = (-3, 3)
args = {'loc':0, 'scale':1}
print('一阶原点矩:', ms.stats.r_moment(norm.pdf, p_range, args))
print('期望:', ms.stats.ex(norm.pdf, p_range, args))
print('一阶中心矩:', ms.stats.c_moment(norm.pdf, p_range, args))
print('二阶中心矩:', ms.stats.c_moment(norm.pdf, p_range, args, 2))
print('方差:', ms.stats.dx(norm.pdf, p_range, args))
print('三阶中心矩:', ms.stats.c_moment(norm.pdf, p_range, args, 3))
print('偏度:', ms.stats.skew(norm.pdf, p_range, args))
print('四阶中心矩:', ms.stats.c_moment(norm.pdf, p_range, args, 4))
print('峰度:', ms.stats.kurt(norm.pdf, p_range, args))
```
<br></br>
## 最大似然法:mle
用于概率密度函数的参数拟合
<br>构建似然函数:$L(\theta)=\ln(\Pi func(x_{i};\theta))=\Sigma \ln(func(x_{i};\theta))$
<br>其中$x_{i}$即数据集data中的元素
<br>求似然函数在$\theta \in$p_range取最大值时的$\theta$取值,即为最合适的参数值
<br>mle(func, data, p_range, acc=0.1)
<br>各参数意义如上式所示
### DEMO 4-1:拟合正态分布的均值和方差
```
from maysics.stats import mle
import numpy as np
from scipy.stats import norm
np.random.seed(100)
data = np.random.normal(0, 0.5, (100,))
def f(x, param):
return norm.pdf(x, loc=param[0], scale=param[1])
mle(f, data, [[-1, 1],[0.1, 1]])
```
|
github_jupyter
|
<br>分别用于显示和保存概率密度函数图像
### DEMO 1-1:拟合概率密度函数$f(x)$,并输出$f(0)$
### DEMO 1-2:拟合概率密度函数$f(x)$,并绘制函数图像
<br></br>
## 单个分布拟合检验:DFT
检验分布与数据的实际分布是否一致
DFT(func_type='pdf')
<br>```func_type```表示检验的函数类型,可选pdf(概率密度函数)、cdf(概率分布函数)、dis(离散分布)
<br>DFT类有三个方法:```fit```、```show```、```savefig```
<br>fit方法用于计算,show和savefig分别用于显示和保存图像
<br>DFT类有三个属性:```degree```、```chi2_value```、```P```
<br>degree表示卡方分布的自由度
<br>chi2_value表示卡方值
<br>P表示拒绝假设的错误概率
### fit方法:
fit(data, func, args={}, acc=0.1)
<br>```data```是待检验的数据集
<br>```func```是待检验的概率函数
<br>```args```用于传递func函数的其他参数
<br>```acc```仅用于```func_type='pdf'```和```func_type='cdf'```,表示积分的精度
### show和savefig方法:
两个方法绘制的图像有两个:
<br>1、原始数据的小提琴图,包括分布情况、四分位点、平均点
<br>2、func的函数图像
<br>需要注意的是,图像的纵坐标是经过缩放的,因此其数值没有具体意义
<br>两个都有一个默认参数:```acc=0.01```
<br>仅在func_type为'pdf'和'cdf'时有效,表示绘图精度
### DEMO 2-1:检验标准正态分布的概率密度函数并绘制图像
### DEMO 2-2:检验标准正态分布的概率分布函数并绘制图像
### DEMO 2-3:检验泊松分布并绘制图像
<br></br>
## 数字特征:r_moment、ex、c_moment、dx、skew、kurt
ex等价于一阶原点矩
<br>dx等价于二阶中心矩
<br>skew等价于三阶中心矩
<br>kurt等价于四阶中心矩
### DEMO 3-1:求在0,1上均匀分布的数据集数字特征
### DEMO 3-2:求标准正态分布概率密度函数的数字特征
<br></br>
## 最大似然法:mle
用于概率密度函数的参数拟合
<br>构建似然函数:$L(\theta)=\ln(\Pi func(x_{i};\theta))=\Sigma \ln(func(x_{i};\theta))$
<br>其中$x_{i}$即数据集data中的元素
<br>求似然函数在$\theta \in$p_range取最大值时的$\theta$取值,即为最合适的参数值
<br>mle(func, data, p_range, acc=0.1)
<br>各参数意义如上式所示
### DEMO 4-1:拟合正态分布的均值和方差
| 0.311322 | 0.974749 |
```
# General setup, as explained earlier
import os
from pprint import pprint
from urllib3.util.retry import Retry
import requests
from requests.adapters import HTTPAdapter
PLANET_API_URL = 'https://api.planet.com/data/v1'
def setup_session(api_key=None):
"""
Initialize a requests.Session that handles Planet api key auth and retries.
:param str api_key:
A Planet api key. Will be read from the PL_API_KEY env var if not specified.
:returns requests.Session session:
A Session instance optimized for use with Planet's api.
"""
if api_key is None:
api_key = os.getenv('PL_API_KEY')
session = requests.Session()
session.auth = (api_key, '')
retries = Retry(total=5,
backoff_factor=0.2,
status_forcelist=[429])
session.mount('https://', HTTPAdapter(max_retries=retries))
return session
session = setup_session() # Or pass in an api key if the environment variable isn't set
```
Yeah, but how do I download data?
---------------------------------------------------
Okay, we've talked a lot about how to search for data, so let's finally start retrieving data.
Downloading data in the Planet API is a 2-step process. We need to first "activate" the asset before we can download it.
Behind the scenes, this is because we don't store what you download in its full, ready-to-use form. We store a much more low-level form of the data that can be processed to multiple different asset types. However, this takes a few minutes of compute time.
Let's work with a scene you should have permission to download assets for:
```
scene = '20180227_181938_1042'
itemtype = 'PSScene4Band'
url = '{}/item-types/{}/items/{}'.format(PLANET_API_URL, itemtype, scene)
response = session.get(url)
response.raise_for_status()
info = response.json()
# Just for fun, let's display the thumbnail:
from IPython.display import Image
Image(session.get(info['_links']['thumbnail']).content)
# And now let's look at the response we got from the API in more detail:
pprint(info)
```
Hopefully, you'll see `download` permissions in the `_permissions` section.
Activation
---------------
We looked at what the `assets` url (in `_links`) returned briefly in section 2. Let's look at it in more detail now.
```
assets_url = info['_links']['assets']
res = session.get(assets_url)
res.raise_for_status()
assets = res.json()
pprint(assets)
```
Okay, that's a bit hard to read... Let's take a look at the structure for a single asset.
```
pprint(assets['analytic_sr'])
```
Note that we see `'download'` in the `_permissions` list. Also note the `status` -- it's "inactive". This means we need to activate the scene before we can download it.
To activate the scene, follow the `activate` url in the `_links` section:
```
response = session.get(assets['analytic_sr']['_links']['activate'])
response.raise_for_status()
# Let's have a closer look at the actual response code
print(response.status_code)
```
Okay, we've requested that the scene be activated. Behind the scenes, a bunch of work is happening to turn the low-level data we store into something usable. Why did we inspect the response code, though?
### Activation Response Codes
After hitting an activation url, you should get a response code back from the API:
* **`202`** - The request has been accepted and the activation will begin shortly.
* **`204`** - The asset is already active and no further action is needed.
* **`401`** - The user does not have permissions to download this file.
You can also get the same information by inspecting the `status` of the asset. The categories are `inactive`, `activating`, and `active`.
Waiting
-----------
We can't download the scene until it's active, as indicated by a 204 response code or `status: active`. We could just wait around a few minutes, but let's automate the waiting. (In other words, let's poll the api...)
```
import time
asset_name = 'analytic_sr'
assets_url = info['_links']['assets'] # From our earlier request
# We could also construct this if needed
print(assets_url)
while True:
# Send a request to the assets url for this item + scene:
res = session.get(assets_url)
res.raise_for_status()
assets = res.json()
if assets[asset_name]['status'] == 'active':
print("Asset is active and ready to download")
break
else:
time.sleep(0.5)
# Print the asset data
pprint(assets[asset_name])
```
Okay! Great! We're ready to download now!
Downloading
-------------------
Note the `location` that's now in the response for our asset. That's what we'll follow to download the data. However, we'll also need to take a look at its headers to determine what filename we should use.
```
download_url = assets[asset_name]['location']
# We don't want to download the full thing all at once, so we'll stream it
response = session.get(download_url, stream=True)
response.raise_for_status()
pprint(response.headers)
```
There's a lot of clutter there but it's standard if you're familiar with what request headers look like. If you're not, the parts we want to look at are `Content-Disposition` and possibly `Content-Type` and `Content-Length`.
The `Content-Disposition` header tells us what name we should save the file as (by default, anyway -- you can do whatever you'd like). The others let us know what type of file it is (in very broad terms) and its size (in bytes).
```
disposition = response.headers['Content-Disposition']
filetype = response.headers['Content-Type']
size = response.headers['Content-Length']
mb_size = int(size) / 1024**2
print('This is a {:.1f}MB {} file'.format(mb_size, filetype))
```
Let's take a bit of a closer look at the content disposition header.
```
print(disposition)
```
To extract the actual filename, we'll use a regex. If you're not familiar with regular expressions, this will find what's inside the quotes with `filename="foo"`:
```
import re
filename = re.findall(r'filename="(.+)"', disposition)[0]
print(filename)
```
Okay! On to actually downloading the file!
Remember the file size? This is over 100MB. Therefore, it's best not to download it at once. Instead, we'll download it in chunks. Fortunately, python has some builtin functions that can do this for us so we don't need to iterate over 1KB at a time.
Let's repeat what we did before to start bringing things together.
```
import shutil
download_url = assets[asset_name]['location']
# We don't want to download the full thing all at once, so we'll stream it
response = session.get(download_url, stream=True)
response.raise_for_status()
disposition = response.headers['Content-Disposition']
filename = re.findall(r'filename="(.+)"', disposition)[0]
# shutil.copyfileobj will download this in chunks. You can do it manually if you prefer.
with open(filename, 'wb') as outfile:
shutil.copyfileobj(response.raw, outfile)
del response
```
Let's double-check that it really did download the full thing (The `!` escapes to a shell in a notebook):
```
! ls -lh *.tif
```
We'll skip working with the file -- that's for other workshops. Let's move on to tying this back in to the searches we were doing earlier.
---
Activating and Download Search Results
--------------------------------------
We've spent a lot of time laying the low-level framework. Now let's walk through one of the most common tasks you'd want to use our api for. We'll query for scenes and download them.
Again, note that there are lots of higher-level tools to do this more easily: e.g. `planet data download` in the cli tool or interactively using https://planet.com/explorer. The point of this workshop is to show the API that those tools are using "under-the-hood". You can do all of what we're about to do with a couple of clicks in explorer or a single command in the cli tool.
One other note, the Orders API also helps automate this process. If you have a set of scenes and assets you want to download, it will activate them and roll them up into a single zip file for you. It's particularly useful if you want to upload the scenes into an AWS or GCS bucket, as it can do that for you instead of delivering a zip file. However, that's for another workshop...
For this workshop, we're going to stick with the Data API. In this case, there are 4 steps:
1. Search for scenes
2. Activate the asset(s) you want for those scenes
3. Wait for them to become active
4. Download the files for each scene/asset.
```
def search(geom, start_date, end_date, item_type, asset):
"""
Query the Planet api for scenes overlapping an AOI within a TOI that
have the specified asset.
:param dict geom: A geojson geometry
:param str start_date: An iso-8601-formatted timestamp in UTC (earliest scenes)
:param str end_date: An iso-8601-formatted timestamp in UTC (latest scenes)
:param str item_type: A single item type name (e.g. PSScene4Band)
:param str asset: The asset name we're going to use.
"""
# First let's filter for scenes where we have download permissions. This
# serves two purposes: 1) avoid scenes that do not have the asset we want,
# and 2) avoid scenes we don't have access to.
perm_filter = {
"type": "PermissionFilter",
"config": ["assets.{}:download".format(asset)]
}
# Then we'll filter for our AOI
geom_filter = {
"type": "GeometryFilter",
"field_name": "geometry",
"config": geom
}
# And the TOI
date_filter = {
"type": "DateRangeFilter",
"field_name": "acquired",
"config": {
"gt": start_date,
"lte": end_date
}
}
# Then combine the three
and_filter = {
"type": "AndFilter",
"config": [perm_filter, geom_filter, date_filter]
}
request = {
"item_types" : [item_type],
"filter" : and_filter
}
resp = session.post("{}/quick-search".format(PLANET_API_URL), json=request)
resp.raise_for_status()
body = resp.json()
for item in body['features']:
yield item
next_url = body['_links'].get('_next')
while next_url:
response = session.get(next_url)
response.raise_for_status()
body = response.json()
next_url = body['_links'].get('_next')
for item in body['features']:
yield item
```
Your accounts for this exercise should have access to download data in California between October 2017 through the end of March 2018. Feel free to substitue different AOIs and Dates here! (go to http://geojson.io to draw a different geometry if you'd like)
For now, we'll just print the names of the scenes we find:
```
geom = {
"type": "Polygon",
"coordinates": [
[
[
-119.68505859375,
35.131702190832634
],
[
-119.60248947143555,
35.06611364116525
],
[
-119.57914352416992,
35.07679117524852
],
[
-119.6714973449707,
35.14026553479837
],
[
-119.68505859375,
35.131702190832634
]
]
]
}
asset = 'visual'
results = search(geom, '2018-01-01T00:00:00Z', '2018-01-10T00:00:00Z', 'PSScene3Band', asset)
# That's a generator. Let's expand it to a list to make it easier to reuse these results later
results = list(results)
for feature in results:
print(feature['id'])
```
Okay, let's move on to the next step - Activation of the results:
```
def _fetch_assets(feature):
# Fetch the assets section
assets_url = feature['_links']['assets']
resp = session.get(assets_url)
resp.raise_for_status()
return resp.json()
def activate(results, asset_name):
"""Activate the results of a search."""
for feature in results:
assets = _fetch_assets(feature)
if assets[asset_name]['status'] == 'inactive':
response = session.get(assets[asset_name]['_links']['activate'])
response.raise_for_status()
# This will be fairly quick...
activate(results, asset)
```
Now we need to wait on things to become active. This can take awhile.
```
def wait_for_active(results, asset_name):
"""Wait for all results in a search to become active"""
active = [False]
while not all(active):
# Getting just a touch fancier with Python. If you're not familiar with it, this is a for loop
statuses = [_fetch_assets(item)[asset_name]['status'] for item in results]
active = [item == 'active' for item in statuses]
# May take awhile...
wait_for_active(results, asset)
```
And now we'll download the files! This might take a bit... We could do this asychronously as well. It's only a few more lines of code, but requires a bit more familiarity with Python, so we'll leave the parallelizing this out for now.
```
def download(results, asset_name):
for feature in results:
assets = _fetch_assets(feature)
download_url = assets[asset_name]['location']
# We don't want to download the full thing all at once, so we'll stream it
response = session.get(download_url, stream=True)
response.raise_for_status()
# Figure out what we should call the local file
disposition = response.headers['Content-Disposition']
filename = re.findall(r'filename="(.+)"', disposition)[0]
print('Downloading {}'.format(filename))
# Download in chunks.
with open(filename, 'wb') as outfile:
shutil.copyfileobj(response.raw, outfile)
del response
yield filename
files = list(download(results, asset)
```
|
github_jupyter
|
# General setup, as explained earlier
import os
from pprint import pprint
from urllib3.util.retry import Retry
import requests
from requests.adapters import HTTPAdapter
PLANET_API_URL = 'https://api.planet.com/data/v1'
def setup_session(api_key=None):
"""
Initialize a requests.Session that handles Planet api key auth and retries.
:param str api_key:
A Planet api key. Will be read from the PL_API_KEY env var if not specified.
:returns requests.Session session:
A Session instance optimized for use with Planet's api.
"""
if api_key is None:
api_key = os.getenv('PL_API_KEY')
session = requests.Session()
session.auth = (api_key, '')
retries = Retry(total=5,
backoff_factor=0.2,
status_forcelist=[429])
session.mount('https://', HTTPAdapter(max_retries=retries))
return session
session = setup_session() # Or pass in an api key if the environment variable isn't set
scene = '20180227_181938_1042'
itemtype = 'PSScene4Band'
url = '{}/item-types/{}/items/{}'.format(PLANET_API_URL, itemtype, scene)
response = session.get(url)
response.raise_for_status()
info = response.json()
# Just for fun, let's display the thumbnail:
from IPython.display import Image
Image(session.get(info['_links']['thumbnail']).content)
# And now let's look at the response we got from the API in more detail:
pprint(info)
assets_url = info['_links']['assets']
res = session.get(assets_url)
res.raise_for_status()
assets = res.json()
pprint(assets)
pprint(assets['analytic_sr'])
response = session.get(assets['analytic_sr']['_links']['activate'])
response.raise_for_status()
# Let's have a closer look at the actual response code
print(response.status_code)
import time
asset_name = 'analytic_sr'
assets_url = info['_links']['assets'] # From our earlier request
# We could also construct this if needed
print(assets_url)
while True:
# Send a request to the assets url for this item + scene:
res = session.get(assets_url)
res.raise_for_status()
assets = res.json()
if assets[asset_name]['status'] == 'active':
print("Asset is active and ready to download")
break
else:
time.sleep(0.5)
# Print the asset data
pprint(assets[asset_name])
download_url = assets[asset_name]['location']
# We don't want to download the full thing all at once, so we'll stream it
response = session.get(download_url, stream=True)
response.raise_for_status()
pprint(response.headers)
disposition = response.headers['Content-Disposition']
filetype = response.headers['Content-Type']
size = response.headers['Content-Length']
mb_size = int(size) / 1024**2
print('This is a {:.1f}MB {} file'.format(mb_size, filetype))
print(disposition)
import re
filename = re.findall(r'filename="(.+)"', disposition)[0]
print(filename)
import shutil
download_url = assets[asset_name]['location']
# We don't want to download the full thing all at once, so we'll stream it
response = session.get(download_url, stream=True)
response.raise_for_status()
disposition = response.headers['Content-Disposition']
filename = re.findall(r'filename="(.+)"', disposition)[0]
# shutil.copyfileobj will download this in chunks. You can do it manually if you prefer.
with open(filename, 'wb') as outfile:
shutil.copyfileobj(response.raw, outfile)
del response
! ls -lh *.tif
def search(geom, start_date, end_date, item_type, asset):
"""
Query the Planet api for scenes overlapping an AOI within a TOI that
have the specified asset.
:param dict geom: A geojson geometry
:param str start_date: An iso-8601-formatted timestamp in UTC (earliest scenes)
:param str end_date: An iso-8601-formatted timestamp in UTC (latest scenes)
:param str item_type: A single item type name (e.g. PSScene4Band)
:param str asset: The asset name we're going to use.
"""
# First let's filter for scenes where we have download permissions. This
# serves two purposes: 1) avoid scenes that do not have the asset we want,
# and 2) avoid scenes we don't have access to.
perm_filter = {
"type": "PermissionFilter",
"config": ["assets.{}:download".format(asset)]
}
# Then we'll filter for our AOI
geom_filter = {
"type": "GeometryFilter",
"field_name": "geometry",
"config": geom
}
# And the TOI
date_filter = {
"type": "DateRangeFilter",
"field_name": "acquired",
"config": {
"gt": start_date,
"lte": end_date
}
}
# Then combine the three
and_filter = {
"type": "AndFilter",
"config": [perm_filter, geom_filter, date_filter]
}
request = {
"item_types" : [item_type],
"filter" : and_filter
}
resp = session.post("{}/quick-search".format(PLANET_API_URL), json=request)
resp.raise_for_status()
body = resp.json()
for item in body['features']:
yield item
next_url = body['_links'].get('_next')
while next_url:
response = session.get(next_url)
response.raise_for_status()
body = response.json()
next_url = body['_links'].get('_next')
for item in body['features']:
yield item
geom = {
"type": "Polygon",
"coordinates": [
[
[
-119.68505859375,
35.131702190832634
],
[
-119.60248947143555,
35.06611364116525
],
[
-119.57914352416992,
35.07679117524852
],
[
-119.6714973449707,
35.14026553479837
],
[
-119.68505859375,
35.131702190832634
]
]
]
}
asset = 'visual'
results = search(geom, '2018-01-01T00:00:00Z', '2018-01-10T00:00:00Z', 'PSScene3Band', asset)
# That's a generator. Let's expand it to a list to make it easier to reuse these results later
results = list(results)
for feature in results:
print(feature['id'])
def _fetch_assets(feature):
# Fetch the assets section
assets_url = feature['_links']['assets']
resp = session.get(assets_url)
resp.raise_for_status()
return resp.json()
def activate(results, asset_name):
"""Activate the results of a search."""
for feature in results:
assets = _fetch_assets(feature)
if assets[asset_name]['status'] == 'inactive':
response = session.get(assets[asset_name]['_links']['activate'])
response.raise_for_status()
# This will be fairly quick...
activate(results, asset)
def wait_for_active(results, asset_name):
"""Wait for all results in a search to become active"""
active = [False]
while not all(active):
# Getting just a touch fancier with Python. If you're not familiar with it, this is a for loop
statuses = [_fetch_assets(item)[asset_name]['status'] for item in results]
active = [item == 'active' for item in statuses]
# May take awhile...
wait_for_active(results, asset)
def download(results, asset_name):
for feature in results:
assets = _fetch_assets(feature)
download_url = assets[asset_name]['location']
# We don't want to download the full thing all at once, so we'll stream it
response = session.get(download_url, stream=True)
response.raise_for_status()
# Figure out what we should call the local file
disposition = response.headers['Content-Disposition']
filename = re.findall(r'filename="(.+)"', disposition)[0]
print('Downloading {}'.format(filename))
# Download in chunks.
with open(filename, 'wb') as outfile:
shutil.copyfileobj(response.raw, outfile)
del response
yield filename
files = list(download(results, asset)
| 0.549157 | 0.790955 |
```
import argparse
import numpy as np
import os
import random
import torch
import torch.optim as optim
import torch.optim.lr_scheduler as sched
import torch.backends.cudnn as cudnn
import torch.utils.data as data
import torchvision
import torchvision.transforms as transforms
import util
from models import Glow
from tqdm import tqdm
import matplotlib.pyplot as plt
device = 'cuda'
transform_test = transforms.Compose([
transforms.ToTensor()
])
dataset = torchvision.datasets.MNIST(root='data', train=False, download=True, transform=transform_test)
dataset.targets = torch.tensor(dataset.targets)
testloader = data.DataLoader(dataset, batch_size=64, shuffle=False, num_workers=8)
loss_fn = util.NLLLoss().to(device)
loss_meter = util.AverageMeter()
net = Glow(num_channels=64,num_levels=1,num_steps=8)
net = net.to(device)
net = torch.nn.DataParallel(net, [0])
# Load checkpoint.
print('Resuming from checkpoint at ckpts/best.pth.tar...')
assert os.path.isdir('ckpts'), 'Error: no checkpoint directory found!'
checkpoint = torch.load('ckpts/best.pth.tar')
net.load_state_dict(checkpoint['net'])
best_loss = checkpoint['test_loss']
start_epoch = checkpoint['epoch']
@torch.no_grad()
def sample(net, batch_size, device):
"""Sample from RealNVP model.
Args:
net (torch.nn.DataParallel): The RealNVP model wrapped in DataParallel.
batch_size (int): Number of samples to generate.
device (torch.device): Device to use.
"""
z = torch.randn((batch_size, 1, 28, 28), dtype=torch.float32, device=device)
print(z.shape)
x, _ = net(z, reverse=True)
x = torch.sigmoid(x)
return x
z = torch.ones((1,1,28,28), dtype=torch.float32, device=device)
print(z.shape)
x, _ = net(z, reverse=True)
x = torch.sigmoid(x)
plt.imshow(x.cpu()[0].detach().numpy()[0,:,:])
images = sample( net,10,'cuda')
plt.imshow(images.cpu()[1].numpy()[0,:,:])
images = []
print(images)
for i in range(10):
images.append(dataset.data[dataset.targets == i ].to(device, dtype=torch.float32))
# dataset.targets= dataset.targets[idx]
# dataset.data = dataset.data[idx].to(device, dtype=torch.float32)
print(images[9].shape)
x = images[2][:100]/255
x = torch.unsqueeze(x,1)
print(x.shape)
z , _ = net(x)
plt.imshow(z.cpu().detach()[80].numpy()[0,:,:])
x = images[9][:10]/255
x = torch.unsqueeze(x,1)
print(x.shape)
z , _ = net(x)
fig, axes= plt.subplots(nrows=10, ncols=2,figsize=(50,50))
for i in range(10):
# plt.
# plt.subplot(10,2,2*i+1)
axes[i][0].imshow(images[9].cpu().detach()[i].numpy())
# plt.subplot(10,2,2*i+2)
axes[i][1].imshow(z.cpu().detach()[i].numpy()[0,:,:])
mean9 = torch.mean(images[9][:10],(0))
print(mean9.shape)
plt.imshow(z.cpu().detach()[1].numpy()[0,:,:])
plt.imshow(mean9.cpu().detach().numpy())
inp = mean9.unsqueeze(0).unsqueeze(0)
z , _ = net(inp/255)
plt.imshow(z.cpu().detach()[0].numpy()[0,:,:])
z.max()
with tqdm(total=len(testloader.dataset)) as progress_bar:
for x, _ in testloader:
x = x.to(device)
z, sldj = net(x, reverse=False)
Zavg = z.mean(dim=0)
loss = loss_fn(z, sldj)
loss_meter.update(loss.item(), x.size(0))
progress_bar.set_postfix(nll=loss_meter.avg,
bpd=util.bits_per_dim(x, loss_meter.avg))
progress_bar.update(x.size(0))
x = dataset.data[:100]/255
x.shape
x = x.to(device)
z, sldj = net(x, reverse=False)
```
|
github_jupyter
|
import argparse
import numpy as np
import os
import random
import torch
import torch.optim as optim
import torch.optim.lr_scheduler as sched
import torch.backends.cudnn as cudnn
import torch.utils.data as data
import torchvision
import torchvision.transforms as transforms
import util
from models import Glow
from tqdm import tqdm
import matplotlib.pyplot as plt
device = 'cuda'
transform_test = transforms.Compose([
transforms.ToTensor()
])
dataset = torchvision.datasets.MNIST(root='data', train=False, download=True, transform=transform_test)
dataset.targets = torch.tensor(dataset.targets)
testloader = data.DataLoader(dataset, batch_size=64, shuffle=False, num_workers=8)
loss_fn = util.NLLLoss().to(device)
loss_meter = util.AverageMeter()
net = Glow(num_channels=64,num_levels=1,num_steps=8)
net = net.to(device)
net = torch.nn.DataParallel(net, [0])
# Load checkpoint.
print('Resuming from checkpoint at ckpts/best.pth.tar...')
assert os.path.isdir('ckpts'), 'Error: no checkpoint directory found!'
checkpoint = torch.load('ckpts/best.pth.tar')
net.load_state_dict(checkpoint['net'])
best_loss = checkpoint['test_loss']
start_epoch = checkpoint['epoch']
@torch.no_grad()
def sample(net, batch_size, device):
"""Sample from RealNVP model.
Args:
net (torch.nn.DataParallel): The RealNVP model wrapped in DataParallel.
batch_size (int): Number of samples to generate.
device (torch.device): Device to use.
"""
z = torch.randn((batch_size, 1, 28, 28), dtype=torch.float32, device=device)
print(z.shape)
x, _ = net(z, reverse=True)
x = torch.sigmoid(x)
return x
z = torch.ones((1,1,28,28), dtype=torch.float32, device=device)
print(z.shape)
x, _ = net(z, reverse=True)
x = torch.sigmoid(x)
plt.imshow(x.cpu()[0].detach().numpy()[0,:,:])
images = sample( net,10,'cuda')
plt.imshow(images.cpu()[1].numpy()[0,:,:])
images = []
print(images)
for i in range(10):
images.append(dataset.data[dataset.targets == i ].to(device, dtype=torch.float32))
# dataset.targets= dataset.targets[idx]
# dataset.data = dataset.data[idx].to(device, dtype=torch.float32)
print(images[9].shape)
x = images[2][:100]/255
x = torch.unsqueeze(x,1)
print(x.shape)
z , _ = net(x)
plt.imshow(z.cpu().detach()[80].numpy()[0,:,:])
x = images[9][:10]/255
x = torch.unsqueeze(x,1)
print(x.shape)
z , _ = net(x)
fig, axes= plt.subplots(nrows=10, ncols=2,figsize=(50,50))
for i in range(10):
# plt.
# plt.subplot(10,2,2*i+1)
axes[i][0].imshow(images[9].cpu().detach()[i].numpy())
# plt.subplot(10,2,2*i+2)
axes[i][1].imshow(z.cpu().detach()[i].numpy()[0,:,:])
mean9 = torch.mean(images[9][:10],(0))
print(mean9.shape)
plt.imshow(z.cpu().detach()[1].numpy()[0,:,:])
plt.imshow(mean9.cpu().detach().numpy())
inp = mean9.unsqueeze(0).unsqueeze(0)
z , _ = net(inp/255)
plt.imshow(z.cpu().detach()[0].numpy()[0,:,:])
z.max()
with tqdm(total=len(testloader.dataset)) as progress_bar:
for x, _ in testloader:
x = x.to(device)
z, sldj = net(x, reverse=False)
Zavg = z.mean(dim=0)
loss = loss_fn(z, sldj)
loss_meter.update(loss.item(), x.size(0))
progress_bar.set_postfix(nll=loss_meter.avg,
bpd=util.bits_per_dim(x, loss_meter.avg))
progress_bar.update(x.size(0))
x = dataset.data[:100]/255
x.shape
x = x.to(device)
z, sldj = net(x, reverse=False)
| 0.74008 | 0.642937 |
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D , MaxPool2D , Flatten , Dropout , BatchNormalization, GlobalAveragePooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report,confusion_matrix
from tensorflow.keras.callbacks import ReduceLROnPlateau
import cv2
import os
labels = ['PNEUMONIA', 'NORMAL']
img_size = 150
data = np.load('../data.npy', allow_pickle=True)
# Shuffle dataset
np.random.seed(1337)
np.random.shuffle(data)
train, val, test = np.split(data, [int(.75*len(data)), int(.80*len(data))])
x_train = []
y_train = []
x_val = []
y_val = []
x_test = []
y_test = []
for feature, label in train:
x_train.append(feature)
y_train.append(label)
for feature, label in test:
x_test.append(feature)
y_test.append(label)
for feature, label in val:
x_val.append(feature)
y_val.append(label)
# Normalize data. Pixels are [0, 255], so dividing will place them into a [0, 1] range.
x_train = np.array(x_train) / 255
x_val = np.array(x_val) / 255
x_test = np.array(x_test) / 255
# Resize all images into a common size of 150 x 150
x_train = x_train.reshape(-1, img_size, img_size, 1)
y_train = np.array(y_train)
x_val = x_val.reshape(-1, img_size, img_size, 1)
y_val = np.array(y_val)
x_test = x_test.reshape(-1, img_size, img_size, 1)
y_test = np.array(y_test)
# Data augmentation to prevent overfitting and handling the imbalance in dataset
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range = 30, # randomly rotate images in the range (degrees, 0 to 180)
zoom_range = 0.2, # Randomly zoom image
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip = True, # randomly flip images
vertical_flip=False) # randomly flip images
datagen.fit(x_train)
baseline = tf.keras.models.load_model('../saved_models/baseline')
attention = tf.keras.models.load_model('../saved_models/my_model')
from utils.vizgradcam.gradcam import VizGradCAM
ids = [65, 230, 422, 648, 961]
unique, counts = np.unique(y_test, return_counts=True)
print(np.asarray((unique, counts)).T)
nums = len(ids)
fig, ax = plt.subplots(3, nums, figsize=(13.7, 8.3))
for column in range(nums):
img = x_test[ids[column]]
label = y_test[ids[column]]
label = 'Pneumonia' if label == 0 else 'Normal'
ax[0][column].imshow(img, cmap='gray')
ax[0][column].set_xticks([])
ax[0][column].set_yticks([])
gradcambase = VizGradCAM(baseline, img, plot_results=False, return_scan=True)
ax[1][column].imshow(gradcambase)
ax[1][column].set_xticks([])
ax[1][column].set_yticks([])
gradcamatt = VizGradCAM(attention, img, plot_results=False, return_scan=True)
ax[2][column].imshow(gradcamatt)
ax[2][column].set_xticks([])
ax[2][column].set_yticks([])
ax[0][column].set(title=f'ID: {ids[column]}, {label}', ylabel='Original')
ax[0][column].label_outer()
ax[1][column].set(ylabel='Baseline')
ax[1][column].label_outer()
ax[2][column].set(ylabel='Attention')
ax[2][column].label_outer()
plt.subplots_adjust(wspace=0.03, hspace=0)
plt.savefig('comparison')
```
|
github_jupyter
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D , MaxPool2D , Flatten , Dropout , BatchNormalization, GlobalAveragePooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report,confusion_matrix
from tensorflow.keras.callbacks import ReduceLROnPlateau
import cv2
import os
labels = ['PNEUMONIA', 'NORMAL']
img_size = 150
data = np.load('../data.npy', allow_pickle=True)
# Shuffle dataset
np.random.seed(1337)
np.random.shuffle(data)
train, val, test = np.split(data, [int(.75*len(data)), int(.80*len(data))])
x_train = []
y_train = []
x_val = []
y_val = []
x_test = []
y_test = []
for feature, label in train:
x_train.append(feature)
y_train.append(label)
for feature, label in test:
x_test.append(feature)
y_test.append(label)
for feature, label in val:
x_val.append(feature)
y_val.append(label)
# Normalize data. Pixels are [0, 255], so dividing will place them into a [0, 1] range.
x_train = np.array(x_train) / 255
x_val = np.array(x_val) / 255
x_test = np.array(x_test) / 255
# Resize all images into a common size of 150 x 150
x_train = x_train.reshape(-1, img_size, img_size, 1)
y_train = np.array(y_train)
x_val = x_val.reshape(-1, img_size, img_size, 1)
y_val = np.array(y_val)
x_test = x_test.reshape(-1, img_size, img_size, 1)
y_test = np.array(y_test)
# Data augmentation to prevent overfitting and handling the imbalance in dataset
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range = 30, # randomly rotate images in the range (degrees, 0 to 180)
zoom_range = 0.2, # Randomly zoom image
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip = True, # randomly flip images
vertical_flip=False) # randomly flip images
datagen.fit(x_train)
baseline = tf.keras.models.load_model('../saved_models/baseline')
attention = tf.keras.models.load_model('../saved_models/my_model')
from utils.vizgradcam.gradcam import VizGradCAM
ids = [65, 230, 422, 648, 961]
unique, counts = np.unique(y_test, return_counts=True)
print(np.asarray((unique, counts)).T)
nums = len(ids)
fig, ax = plt.subplots(3, nums, figsize=(13.7, 8.3))
for column in range(nums):
img = x_test[ids[column]]
label = y_test[ids[column]]
label = 'Pneumonia' if label == 0 else 'Normal'
ax[0][column].imshow(img, cmap='gray')
ax[0][column].set_xticks([])
ax[0][column].set_yticks([])
gradcambase = VizGradCAM(baseline, img, plot_results=False, return_scan=True)
ax[1][column].imshow(gradcambase)
ax[1][column].set_xticks([])
ax[1][column].set_yticks([])
gradcamatt = VizGradCAM(attention, img, plot_results=False, return_scan=True)
ax[2][column].imshow(gradcamatt)
ax[2][column].set_xticks([])
ax[2][column].set_yticks([])
ax[0][column].set(title=f'ID: {ids[column]}, {label}', ylabel='Original')
ax[0][column].label_outer()
ax[1][column].set(ylabel='Baseline')
ax[1][column].label_outer()
ax[2][column].set(ylabel='Attention')
ax[2][column].label_outer()
plt.subplots_adjust(wspace=0.03, hspace=0)
plt.savefig('comparison')
| 0.710126 | 0.612078 |
**Importar bibliotecas**
```
import os, flopy
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import flopy.utils.binaryfile as bf
from ipywidgets import interact
import shutil
```
**MODELO DE FLUJO**
**Cargar Resultados del Modelo de Flujo**
```
model_ws='../4_OUT/R_SIM_KRIG'
modelname='Modelo_Flujo'
exe_name= '../1_SOLVER/mf2005.exe'
mf = flopy.modflow.Modflow.load(modelname+'.nam', exe_name=exe_name, model_ws=model_ws)
```
**Cargas Hidráulicas horizontal**
```
nlay,nrow,ncol = 30,1,100
headobj = bf.HeadFile(model_ws + '/'+ modelname + '.hds')
times = headobj.get_times()
cbb = bf.CellBudgetFile(model_ws + '/'+ modelname + '.cbc')
for iplot, time in enumerate(times):
#print('*****Processing time: ', time)
head = headobj.get_data(totim=time)
#Extract flow right face and flow front face
qx = cbb.get_data(text='FLOW RIGHT FACE', totim=time)[0]
qy = np.zeros((nlay,nrow,ncol),dtype=float)
qz = cbb.get_data(text='FLOW LOWER FACE', totim=time)[0]
#head_1=head.reshape(head.shape[0],-1)
#np.savetxt(model_ws+'/'+'ALTURA.txt',head_1,fmt='%18e')
#Celdas activas y no activas
fig = plt.figure(figsize=(22, 5))
ax = fig.add_subplot(1, 1, 1)
# Next we create an instance of the ModelMap class
modelxsect = flopy.plot.PlotCrossSection(model=mf, line={'Row': 0})
fb = modelxsect.plot_fill_between(head, colors=['brown', 'cyan'])
#patches = modelxsect.csplot_ibound(head=head)
patches = modelxsect.plot_bc('CHD', head=head)
linecollection = modelxsect.plot_grid()
t = ax.set_title('CELDAS ACTIVAS / NO ACTIVAS - ESTIMACION CAMPO SUAVE (KRIGING)', fontsize=20)
#plt.savefig(model_ws+'/'+'imagenes'+'/'+'Celdas_Activas.png')
plt.show()
```
**Mapa de Cargas Hidráulicas Sección Vertical**
```
# First step is to set up the plot
fig = plt.figure(figsize=(22,5))
ax = fig.add_subplot(1, 1, 1)
levels = np.arange(300, 501, 10)
# Next we create an instance of the ModelMap class
modelxsect = flopy.plot.PlotCrossSection(model=mf, line={'Row': 0})
contour_set = modelxsect.contour_array(head, masked_values=[999.,-2.00000E+20], levels=levels, colors='w')
plt.clabel(contour_set, fmt='%5.1f', colors='w', fontsize=14)
# Then we can use the plot_grid() method to draw the grid
# The return value for this function is a matplotlib LineCollection object,
# which could be manipulated (or used) later if necessary.
hv = modelxsect.plot_array(head, head=head, masked_values=[999.00, -2.00000E+20])
patches = modelxsect.plot_ibound(head=head)
linecollection = modelxsect.plot_grid()
t = ax.set_title('SECCION VERTICAL MODELO DE FLUJO - ESTIMACION CAMPO SUAVE (KRIGING)', fontsize=20)
fig.colorbar(hv, orientation='vertical', format='%1.1f')
plt.savefig(model_ws+'/'+'imagenes'+'/'+'M_Flujo.png')
plt.show()
```
**Líneas de altura piezométricas**
```
levels = np.arange(300, 520, 10)
fig = plt.figure(figsize=(22, 7))
modelxsect = flopy.plot.PlotCrossSection(model=mf, line={'row': 0})
# contour array and plot ibound
ct = modelxsect.contour_array(head, masked_values=[-2.0000E+20], head=head, levels=levels, linewidths=1.5)
cts = modelxsect.plot_surface(head[21], masked_values=[-2.0000E+20], color='blue', linewidth=1)
#linecollection = modelxsect.plot_grid()
pc = modelxsect.plot_ibound(head=head)
plt.clabel(ct, fmt='%1.1f', colors='b', fontsize=18)
plt.title('contour_array() and plot_surface()')
#cb = plt.colorbar(ct, shrink=0.75)
```
**Dirección de Flujo**
```
# Crear archivo de alturas
fig =plt.figure(figsize=(18,5))
ax = fig.add_subplot(1, 1, 1)
modelxsect = flopy.plot.PlotCrossSection(model=mf, line={'Row':0})
pc = modelxsect.plot_array(head, masked_values=[999.00, -2.00000E+20], head=head, alpha=1.5)
#hv = modelxsect.plot_array(head, head=head, masked_values=[999.00, -2.00000E+20])
patches = modelxsect.plot_ibound(head=head)
linecollection = modelxsect.plot_grid()
#plot the surface and grid (napa freatica)
wt= modelxsect.plot_surface(head[0], masked_values=[-1.00000E+30], color='blue',lw=2.5)
linecollection = modelxsect.plot_grid()
quiver =modelxsect.plot_discharge(qx, -qz, head=head, hstep=15, normalize=True,
color='w', scale=10, headwidth=2.5, headlength=2.5, headaxislength=2.5, zorder=4)
#modelxsect.plot_vector(qx, qy, -qz, color="white", kstep=1., hstep=1.)
#plt.colorbar(wt, shrink=0.75)
```
**MODELO DE TRANSPORTE**
**Cargar Resultados del Modelo de Transporte**
```
namemt3d='transModel'
mt_model = flopy.mt3d.Mt3dms.load(namemt3d+'.nam', model_ws=model_ws, version='mt3d-usgs',
exe_name='../1_SOLVER/mt3d-usgs_1.1.0_64.exe', modflowmodel=mf)
```
**Cargas archivos de transporte**
```
fname = os.path.join(model_ws +'/'+'MT3D001.UCN')
ucnobj = flopy.utils.UcnFile(fname)
times = ucnobj.get_times()
conc = ucnobj.get_alldata()
fname = os.path.join(model_ws +'/'+'MT3D001.MAS')
mvt = mt_model.load_mas(fname)
Conc_1=conc.reshape(head.shape[0],-1)
np.savetxt(model_ws+'/'+'Concentraciones.txt',Conc_1,fmt='%18e')
```
**Generar graficos iteractivo**
```
hk=mf.lpf.hk.array
@interact(Conc=([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]))
def f(Conc):
fig =plt.figure(figsize=(20,4))
ax = fig.add_subplot(1, 1, 1)
csa = modelxsect.plot_array(np.log(hk),cmap='gist_rainbow')
mx = flopy.plot.PlotCrossSection(ax=ax, model=mf, line={'row':0})
mx.plot_ibound()
mx.plot_grid(color='.5', alpha=0.2)
c_s = mx.plot_array(conc[Conc], masked_values=[-1.00000E+30])
plt.title('Concentracion de Cadmio t = {} sec'.format(times[Conc]))
fig.colorbar(c_s, orientation='vertical', format='%1.2f')
plt.show()
t =[(times[2]/(3600*24)),(times[4]/(3600*24)),(times[6]/(3600*24*345)), (times[8]/(3600*24*365)),
(times[10]/(3600*24*365)), (times[12]/(3600*24*365))]
t
time=['10.0 días', '50.0 dias', '0.52 años', '3.0 años', '7.0 años', '35.0 años']
t
```
**Generar gráficos de Conductividad Hidráulica, Altura piezométrica y concentraciones a distintos tiempos**
```
a = mf.lpf.hk.array
fig = plt.figure(figsize=(20,3))
ax = fig.add_subplot(1,2,1)
xsect =flopy.plot.PlotCrossSection(model=mf, line={'Row': 0})
csa = xsect.plot_array(np.log(a),cmap='jet')#'gist_rainbow')
#patches = xsect.plot_ibound()
#linecollection=xsect.plot_grid()
cb = plt.colorbar(csa, shrink =0.75)
t = ax.set_title('HETEROGENEIDAD DE LA CONDUCTIVIDAD HIDRULICA - KRIGING', fontsize=13)
ax = fig.add_subplot(1, 2, 2)
levels = np.arange(300, 501, 10)
# Next we create an instance of the ModelMap class
modelxsect = flopy.plot.PlotCrossSection(model=mf, line={'Row': 0})
contour_set = modelxsect.contour_array(head, masked_values=[999.,-2.00000E+20], levels=levels, colors='w')
plt.clabel(contour_set, fmt='%5.1f', colors='w', fontsize=13)
# Then we can use the plot_grid() method to draw the grid
# The return value for this function is a matplotlib LineCollection object,
# which could be manipulated (or used) later if necessary.
hv = modelxsect.plot_array(head, head=head, masked_values=[999.00, -2.00000E+20])
patches = modelxsect.plot_ibound(head=head)
linecollection = modelxsect.plot_grid(color='.1', alpha=0.1)
t = ax.set_title('ESTIMACION CAMPO SUAVE (KRIGING)', fontsize=13)
fig.colorbar(hv, orientation='vertical', format='%1.1f')
fig = plt.figure(figsize=(20,2.5))
for j in range (0,3):
ax = fig.add_subplot(1, 3, j+1)
mx = flopy.plot.PlotCrossSection(ax=ax, model=mf, line={'row':0})
mx.plot_ibound()
#mx.plot_grid(color='.5', alpha=0.2)
#csa = xsect.plot_array(np.log(a),cmap='gist_rainbow')
c_s = mx.plot_array(conc[(j+1)*2], masked_values=[-1.00000E+30])
plt.title('CONCENTRACION DE CADMIO t = {}'.format(time[j]))
fig.colorbar(c_s, orientation='vertical', format='%1.2f')
fig = plt.figure(figsize=(20,2.5))
for j in range (0,3):
ax = fig.add_subplot(1, 3, j+1)
mx = flopy.plot.PlotCrossSection(ax=ax, model=mf, line={'row':0})
mx.plot_ibound()
#mx.plot_grid(color='.5', alpha=0.2)
c_s = mx.plot_array(conc[(j+4)*2], masked_values=[-1.00000E+30])
plt.title('CONCENTRACION DE CADMIO t = {}'.format(time[j+3]))
fig.colorbar(c_s, orientation='vertical', format='%1.2f')
#plt.savefig(model_ws+'/'+'imagenes'+'/'+'M_TRANSPORTE.png')
plt.show()
```
|
github_jupyter
|
import os, flopy
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import flopy.utils.binaryfile as bf
from ipywidgets import interact
import shutil
model_ws='../4_OUT/R_SIM_KRIG'
modelname='Modelo_Flujo'
exe_name= '../1_SOLVER/mf2005.exe'
mf = flopy.modflow.Modflow.load(modelname+'.nam', exe_name=exe_name, model_ws=model_ws)
nlay,nrow,ncol = 30,1,100
headobj = bf.HeadFile(model_ws + '/'+ modelname + '.hds')
times = headobj.get_times()
cbb = bf.CellBudgetFile(model_ws + '/'+ modelname + '.cbc')
for iplot, time in enumerate(times):
#print('*****Processing time: ', time)
head = headobj.get_data(totim=time)
#Extract flow right face and flow front face
qx = cbb.get_data(text='FLOW RIGHT FACE', totim=time)[0]
qy = np.zeros((nlay,nrow,ncol),dtype=float)
qz = cbb.get_data(text='FLOW LOWER FACE', totim=time)[0]
#head_1=head.reshape(head.shape[0],-1)
#np.savetxt(model_ws+'/'+'ALTURA.txt',head_1,fmt='%18e')
#Celdas activas y no activas
fig = plt.figure(figsize=(22, 5))
ax = fig.add_subplot(1, 1, 1)
# Next we create an instance of the ModelMap class
modelxsect = flopy.plot.PlotCrossSection(model=mf, line={'Row': 0})
fb = modelxsect.plot_fill_between(head, colors=['brown', 'cyan'])
#patches = modelxsect.csplot_ibound(head=head)
patches = modelxsect.plot_bc('CHD', head=head)
linecollection = modelxsect.plot_grid()
t = ax.set_title('CELDAS ACTIVAS / NO ACTIVAS - ESTIMACION CAMPO SUAVE (KRIGING)', fontsize=20)
#plt.savefig(model_ws+'/'+'imagenes'+'/'+'Celdas_Activas.png')
plt.show()
# First step is to set up the plot
fig = plt.figure(figsize=(22,5))
ax = fig.add_subplot(1, 1, 1)
levels = np.arange(300, 501, 10)
# Next we create an instance of the ModelMap class
modelxsect = flopy.plot.PlotCrossSection(model=mf, line={'Row': 0})
contour_set = modelxsect.contour_array(head, masked_values=[999.,-2.00000E+20], levels=levels, colors='w')
plt.clabel(contour_set, fmt='%5.1f', colors='w', fontsize=14)
# Then we can use the plot_grid() method to draw the grid
# The return value for this function is a matplotlib LineCollection object,
# which could be manipulated (or used) later if necessary.
hv = modelxsect.plot_array(head, head=head, masked_values=[999.00, -2.00000E+20])
patches = modelxsect.plot_ibound(head=head)
linecollection = modelxsect.plot_grid()
t = ax.set_title('SECCION VERTICAL MODELO DE FLUJO - ESTIMACION CAMPO SUAVE (KRIGING)', fontsize=20)
fig.colorbar(hv, orientation='vertical', format='%1.1f')
plt.savefig(model_ws+'/'+'imagenes'+'/'+'M_Flujo.png')
plt.show()
levels = np.arange(300, 520, 10)
fig = plt.figure(figsize=(22, 7))
modelxsect = flopy.plot.PlotCrossSection(model=mf, line={'row': 0})
# contour array and plot ibound
ct = modelxsect.contour_array(head, masked_values=[-2.0000E+20], head=head, levels=levels, linewidths=1.5)
cts = modelxsect.plot_surface(head[21], masked_values=[-2.0000E+20], color='blue', linewidth=1)
#linecollection = modelxsect.plot_grid()
pc = modelxsect.plot_ibound(head=head)
plt.clabel(ct, fmt='%1.1f', colors='b', fontsize=18)
plt.title('contour_array() and plot_surface()')
#cb = plt.colorbar(ct, shrink=0.75)
# Crear archivo de alturas
fig =plt.figure(figsize=(18,5))
ax = fig.add_subplot(1, 1, 1)
modelxsect = flopy.plot.PlotCrossSection(model=mf, line={'Row':0})
pc = modelxsect.plot_array(head, masked_values=[999.00, -2.00000E+20], head=head, alpha=1.5)
#hv = modelxsect.plot_array(head, head=head, masked_values=[999.00, -2.00000E+20])
patches = modelxsect.plot_ibound(head=head)
linecollection = modelxsect.plot_grid()
#plot the surface and grid (napa freatica)
wt= modelxsect.plot_surface(head[0], masked_values=[-1.00000E+30], color='blue',lw=2.5)
linecollection = modelxsect.plot_grid()
quiver =modelxsect.plot_discharge(qx, -qz, head=head, hstep=15, normalize=True,
color='w', scale=10, headwidth=2.5, headlength=2.5, headaxislength=2.5, zorder=4)
#modelxsect.plot_vector(qx, qy, -qz, color="white", kstep=1., hstep=1.)
#plt.colorbar(wt, shrink=0.75)
namemt3d='transModel'
mt_model = flopy.mt3d.Mt3dms.load(namemt3d+'.nam', model_ws=model_ws, version='mt3d-usgs',
exe_name='../1_SOLVER/mt3d-usgs_1.1.0_64.exe', modflowmodel=mf)
fname = os.path.join(model_ws +'/'+'MT3D001.UCN')
ucnobj = flopy.utils.UcnFile(fname)
times = ucnobj.get_times()
conc = ucnobj.get_alldata()
fname = os.path.join(model_ws +'/'+'MT3D001.MAS')
mvt = mt_model.load_mas(fname)
Conc_1=conc.reshape(head.shape[0],-1)
np.savetxt(model_ws+'/'+'Concentraciones.txt',Conc_1,fmt='%18e')
hk=mf.lpf.hk.array
@interact(Conc=([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]))
def f(Conc):
fig =plt.figure(figsize=(20,4))
ax = fig.add_subplot(1, 1, 1)
csa = modelxsect.plot_array(np.log(hk),cmap='gist_rainbow')
mx = flopy.plot.PlotCrossSection(ax=ax, model=mf, line={'row':0})
mx.plot_ibound()
mx.plot_grid(color='.5', alpha=0.2)
c_s = mx.plot_array(conc[Conc], masked_values=[-1.00000E+30])
plt.title('Concentracion de Cadmio t = {} sec'.format(times[Conc]))
fig.colorbar(c_s, orientation='vertical', format='%1.2f')
plt.show()
t =[(times[2]/(3600*24)),(times[4]/(3600*24)),(times[6]/(3600*24*345)), (times[8]/(3600*24*365)),
(times[10]/(3600*24*365)), (times[12]/(3600*24*365))]
t
time=['10.0 días', '50.0 dias', '0.52 años', '3.0 años', '7.0 años', '35.0 años']
t
a = mf.lpf.hk.array
fig = plt.figure(figsize=(20,3))
ax = fig.add_subplot(1,2,1)
xsect =flopy.plot.PlotCrossSection(model=mf, line={'Row': 0})
csa = xsect.plot_array(np.log(a),cmap='jet')#'gist_rainbow')
#patches = xsect.plot_ibound()
#linecollection=xsect.plot_grid()
cb = plt.colorbar(csa, shrink =0.75)
t = ax.set_title('HETEROGENEIDAD DE LA CONDUCTIVIDAD HIDRULICA - KRIGING', fontsize=13)
ax = fig.add_subplot(1, 2, 2)
levels = np.arange(300, 501, 10)
# Next we create an instance of the ModelMap class
modelxsect = flopy.plot.PlotCrossSection(model=mf, line={'Row': 0})
contour_set = modelxsect.contour_array(head, masked_values=[999.,-2.00000E+20], levels=levels, colors='w')
plt.clabel(contour_set, fmt='%5.1f', colors='w', fontsize=13)
# Then we can use the plot_grid() method to draw the grid
# The return value for this function is a matplotlib LineCollection object,
# which could be manipulated (or used) later if necessary.
hv = modelxsect.plot_array(head, head=head, masked_values=[999.00, -2.00000E+20])
patches = modelxsect.plot_ibound(head=head)
linecollection = modelxsect.plot_grid(color='.1', alpha=0.1)
t = ax.set_title('ESTIMACION CAMPO SUAVE (KRIGING)', fontsize=13)
fig.colorbar(hv, orientation='vertical', format='%1.1f')
fig = plt.figure(figsize=(20,2.5))
for j in range (0,3):
ax = fig.add_subplot(1, 3, j+1)
mx = flopy.plot.PlotCrossSection(ax=ax, model=mf, line={'row':0})
mx.plot_ibound()
#mx.plot_grid(color='.5', alpha=0.2)
#csa = xsect.plot_array(np.log(a),cmap='gist_rainbow')
c_s = mx.plot_array(conc[(j+1)*2], masked_values=[-1.00000E+30])
plt.title('CONCENTRACION DE CADMIO t = {}'.format(time[j]))
fig.colorbar(c_s, orientation='vertical', format='%1.2f')
fig = plt.figure(figsize=(20,2.5))
for j in range (0,3):
ax = fig.add_subplot(1, 3, j+1)
mx = flopy.plot.PlotCrossSection(ax=ax, model=mf, line={'row':0})
mx.plot_ibound()
#mx.plot_grid(color='.5', alpha=0.2)
c_s = mx.plot_array(conc[(j+4)*2], masked_values=[-1.00000E+30])
plt.title('CONCENTRACION DE CADMIO t = {}'.format(time[j+3]))
fig.colorbar(c_s, orientation='vertical', format='%1.2f')
#plt.savefig(model_ws+'/'+'imagenes'+'/'+'M_TRANSPORTE.png')
plt.show()
| 0.368292 | 0.738592 |
# Atmosphere Numerical and Data Analysis Answers
```
import numpy as np
import matplotlib.pyplot as plt
from SciServer import CasJobs
from SciServer import Authentication
from io import StringIO
from astropy.table import Table
import requests
```
## 1. Differential chromatic aberration
*Based on answer by Dou Liu*
The equations in [Stone (1996)](https://ui.adsabs.harvard.edu/#abs/1996PASP..108.1051S/abstract) relate index of refraction to zenith angle, and give the dependence of index of refraction on local conditions and wavelength.
They claim this to be accurate under most conditions better than 0.150 arcsec for zenith angles less than 75 deg, and better than 0.010 arcsec for zenith angles less the 65 deg.
These equations allow you to relate the *apparent* zenith angle (which is what the star would be at if there were no atmosphere) to the *observed* zenith angle (which is what the star should be at accounting for the atmosphere). The observed zenith angle is always smaller than the apparent zenith angle. We define the refraction as the apparent minus the observed.
Note that astropy, starlink, IRAF, and other astronomical tools have built into them refraction calculators. Those other tools are the ones you should actually use!
$\kappa$ is the ratio of the gravity at the observing site to the sea-level gravity at the earth's equator.
$$
\kappa=\frac{g_0}{g}=1+0.005302\sin^2\phi-5.83\times 10^{-6}\sin^2(2\phi)-3.15\times10^{-7}h
$$
```
def kappa(phi, h):
"""Gravitational constant at observatory relative to equator
Parameters
----------
phi : np.float32
latitude of observatory (deg)
h : np.float32
height of observatory from sea level (meters)
"""
k = 1 + 5.302 / 1E3 * np.sin(phi)**2 - 5.83 / 1E6 * np.sin(2. * phi)**2 - 3.15 / 1E7 * h
return k
```
The atmosphere height effect is account for with
$$
\beta=\frac{H}{R_\text{earth}}=\frac{1}{R_\text{earth}}\int_0^\infty e^{-h/h_0}dh=\frac{0.01254}{273.15}T
$$
```
def beta(T):
"""Scaling of temperature
Parameters
----------
T : np.float32
temperature in K
"""
return(4.5908 / 1E6 * T)
```
The angular difference in arcsec due to chromatic atmospheric difference refraction is
$$R(\lambda)\simeq \kappa(n(\lambda)-1)(1-\beta)\tan z-\kappa(1-n(\lambda)\left(\beta-\frac{1}{2}(n(\lambda)-1)\right)\tan^3z$$
where $z$ is the zenith angle and $n$ is the index of refraction.
This formula accounts for the spherical shape of the Earth and atmosphere, which is why the gravity and the height enter the calculation.
```
def refraction(lam=5500., T=285., Ps=1000., RH=0.2,
phi=0., h=0., z=0., n=None):
"""Refraction calculation
Parameters
----------
lam : np.float32
wavelength (A) (default 5500.)
T : np.float32
temperature in K (default 285.)
Ps : np.float32
pressure (mbar) (default 1000.)
RH : np.float32
relative humidity (default 0.2)
phi : np.float32
latitude (deg) (default 0.)
h : np.float32
height from sea level (default 0.)
z : np.float32
zenith angle (deg) (default 0.)
n : np.float32
index of refraction (by default sets with local_ref())
Returns:
-------
R : np.float32
refraction in arcsec (difference of apparent to observed zenith angle)
"""
if(n is None):
n = local_ref(lam, T, Ps, RH)
zrad = z * np.pi / 180.
k = kappa(phi, h)
b = beta(T)
R = k * (n - 1) * (1 - b) * np.tan(zrad) - k * (1 - n) * (b - (n - 1) / 2) * np.tan(zrad)**3
R = R * 180. / np.pi * 3600.
return R
```
The index of refraction of air $n$ is computed accurately with the relations
$$
n=1+10^{-8}\left[(2371.34+\frac{683939.7}{130-\sigma^2}+\frac{4547.3}{38.9-\sigma^2}\right]D_s
+(6487.31+58.058\sigma^2-0.71150\sigma^4+0.08851\sigma^6)D_w)
P_w=RH\times10^{4}\times \exp{77.3450+0.0057T-7235.0/T}/T^{8.2}\\
D_s=\left[1+(P_s-P_w)(57.90\times10^{-8}-\frac{9.3250\times10^{-4}}{T}+\frac{0.25844}{T^2})\right]\frac{P_s-P_w}{T}\\
D_w=\left[1+P_w(1+3.7\times10^{-4}P_w)\left(-2.37321\times10^{-3}+\frac{2.23366}{T}-\frac{710.792}{T^2}+\frac{7.75141\times10^4}{T^3}\right)\right]\frac{P_w}{T}
$$
where $\sigma$ is the wave number
$$
\sigma=\frac{10^4}{\lambda}
$$
where $D_s$ and $D_w$ are the density factors for dry air and water vapor.
```
def local_ref(lam=5500., T=285., Ps=1000., RH=0.2):
"""Local refraction index
Parameters
----------
lam : np.float32
wavelength (A) (default 5500.)
T : np.float32
temperature in K (default 285.)
Ps : np.float32
pressure (mbar) (default 1000.)
RH : np.float32
relative humidity (default 0.2)
Returns:
-------
n : np.float32
index of refraction (by default sets with local_ref())
"""
sigma=10**4/lam
Pw=RH/1E4*np.exp(77.3450+0.0057*T-7235.0/T)/T**8.2
Ds=(1+(Ps-Pw)*(57.90/1E8-9.3250*1E-4/T+0.25844/T**2))*(Ps-Pw)/T
Dw=(1+Pw*(1+3.7/1E4*Pw)*(-2.37321/1E3+2.23366/T-710.792/T**2+7.75141*10**4/T**3))*Pw/T
n=1+((2371.34+683939.7/(130-sigma)+4547.3/(38.9-sigma**2))*Ds
+(6487.31+58.058*sigma**2-0.71150*sigma**4+0.08851*sigma**6)*Dw)*1E-8
return n
```
It is instructive to compare this refraction to what we would derive just from Snell's law for a plane-parallel atmosphere. This is much simpler to derive. We start with:
$$
n_{\rm space} \sin z_{\rm app} = n_{\rm obs} \sin z_{\rm obs}
$$
and noting that $n_{\rm space} = 1$:
$$
z_{\rm obs} = \sin^{-1} \left(\frac{\sin z_{\rm app}}{n_{\rm obs}}\right)
$$
Note that in the plane-parallel case, only the index of refraction local to the observatory matters.
It is easy to show that for $n \approx 1$:
$$
R = z_{\rm app} - z_{\rm obs} \approx (n - 1) \tan z_{\rm app}
$$
This shows why the leading term of Stone's equation looks the way that it does.
```
def refraction_pp(lam=5500., T=285., Ps=1000., RH=0.2,
z=0., n=None):
"""Refraction calculation in plane-parallel approximation
Parameters
----------
lam : np.float32
wavelength (A) (default 5500.)
T : np.float32
temperature in K (default 285.)
Ps : np.float32
pressure (mbar) (default 1000.)
RH : np.float32
relative humidity (default 0.2)
z : np.float32
zenith angle (deg) (default 0.)
n : np.float32
index of refraction (by default sets with local_ref())
Returns:
-------
R : np.float32
refraction in arcsec (difference of apparent to observed zenith angle)
"""
if(n is None):
n = local_ref(lam, T, Ps, RH)
zrad = z * np.pi / 180.
zobsrad = np.arcsin(np.sin(zrad) / n)
R = (zrad - zobsrad) * 180. / np.pi * 3600.
return R
```
Just to check, we can look at the index of refraction as a function of wavelength. $n-1$ is very close to zero, of order $10^{-4}$, and importantly it changes significantly with wavelength, getting smaller at higher wavelength. This is the source of the chromatic effects. Note that the chromatic effects in this range are about $10^{-2}$ of the total index of refraction.
```
nlam = 1000
lamstart = 3500.
lamend = 20000.
lam = lamstart + (lamend - lamstart) * np.arange(nlam) / np.float32(nlam - 1)
n = local_ref(lam=lam)
plt.plot(lam, n - 1.)
plt.xlabel('Wavelength (Ang)')
plt.ylabel('n - 1')
```
Now let us look at the refraction as a function of zenith angle. We can see a couple of things. First, it is a pretty large effect! The observed images are moved many arcsecs from their apparent position. Second, the plane parallel approximation is pretty good, within a fraction of an arcsec until $z>40$ deg.
```
nz = 100
z = 75. * np.arange(nz) / np.float32(nz)
Rpp = refraction_pp(z=z)
R = refraction(z=z)
fig, ax = plt.subplots(1, 2, figsize=(9, 4))
ax[0].plot(z, R, color='red', label='Spherical')
ax[0].plot(z, Rpp, color='black', label='Plane-parallel')
ax[0].set_xlabel("Zenith angle (deg)")
ax[0].set_ylabel("Refraction (arcsec)")
ax[0].legend()
ax[1].plot(z, R - Rpp)
ax[1].set_xlabel("Zenith angle (deg)")
ax[1].set_ylabel("Spherical minus Plane-parallal (arcsec)")
```
Now let us look at airmass 1.2, which is zenith angle of $z \approx 33$ deg, and consider the refraction as a function of wavelength.
The differential refraction across the optical and near-infrared bands is about 0.5 arcsec, not coincidentally about $10^{-2}$ of the total refraction effect (because this is the fractional variation of $n$ in that range). Between 4000 and 8000 Angstroms, the image shifts about 0.25 arcsec, in the sense that the bluer light is higher in the sky than the redder light.
Aside from a very slight overall difference of $\sim 0.02$ arcsec in the absolute refraction, the plane-parallel approximation captures the variation with wavelength very accurately.
```
z = 33.
Rpp = refraction_pp(z=z, lam=lam)
R = refraction(z=z, lam=lam)
fig, ax = plt.subplots(1, 2, figsize=(16, 4))
ax[0].plot(lam, R, color='red', label='Spherical')
ax[0].plot(lam, Rpp, color='black', label='Plane-parallel')
ax[0].set_xlabel("Wavelength (Ang)")
ax[0].set_ylabel("Refraction (arcsec)")
ax[0].legend()
ax[1].plot(lam, R - Rpp)
ax[0].set_xlabel("Wavelength (Ang)")
ax[1].set_ylabel("Spherical minus Plane-parallal (arcsec)")
```
## 2. Plot sky brightness vs airmass for SDSS imaging observations
*Based on answer by David Mykytyn*
We first define a function to retrieve the data we are interested in for a given run.
```
def retrieve_camcol(run=756, camcol=3):
# We define the columns we want and their
columns = ('ra', 'dec', 'airmass_g', 'airmass_i', 'sky_g', 'sky_i', 'field')
dtypes= ('f8', 'f8', 'f4', 'f4', 'f4', 'f4', 'i4')
# Now define the query
query = """
SELECT {columns}
FROM Field
WHERE run = {run} and camcol = {camcol}
"""
query = query.format(columns=', '.join(list(columns)),
run=run, camcol=camcol)
# Execute the query (requires internet access)
responseStream = CasJobs.executeQuery(query, "DR12", format="dict")
# convert result into astropy table
result = responseStream['Result'][0]
data = list(map(list, zip(*result['Data'])))
fields = Table(data, names=columns, dtype=dtypes)
isort = np.argsort(fields['field'])
fields = fields[isort]
return(fields)
```
Now we define a function to plot the asked-for results.
```
def plot_sky(fields=None):
field = fields['field']
sky_g = 22.5 - 2.5 * np.log10(fields['sky_g'])
sky_i = 22.5 - 2.5 * np.log10(fields['sky_i'])
airmass_g = fields['airmass_g']
airmass_i = fields['airmass_i']
fig, axes = plt.subplots(2, 2, figsize=(10, 10))
axes[0, 0].plot(field, sky_g)
axes[0, 0].set_xlabel('Field Number')
axes[0, 0].set_ylabel('g_band sky (mag/arcsec$^2$)')
axes[0, 1].plot(airmass_g, sky_g, '.')
axes[0, 1].set_xlabel('airmass g')
axes[0, 1].set_ylabel('g_band sky (mag/arcsec$^2$)')
axes[1, 0].plot(field, sky_i)
axes[1, 0].set_xlabel('Field Number')
axes[1, 0].set_ylabel('i_band sky (mag/arcsec$^2$)')
axes[1, 1].plot(airmass_i, sky_i, '.')
axes[1, 1].set_xlabel('airmass i')
axes[1, 1].set_ylabel('i_band sky (mag/arcsec$^2$)')
```
First, let us consider a run that turns out to be taken at almost fixed airmass the entier time. We see substantial variation in sky brightness in both bands that correlates with field (time) in the same manner for both. This indicates sky that is coming from scattered light that is varying with time.
```
fields = retrieve_camcol(run=4905)
plot_sky(fields)
```
But we can also consider a run where there is substantial variation of airmass. There is often a clear correlation of brightness with airmass. As you get further from zenith, you expect the brightness to increase. Note that this does not happen with the exact factor you expect (linear with airmass), because the sky is not uniform and it also varies, even for the $g$ band. For the $i$ band, the OH emission is even more variable in space and time, and so you see extra variation there.
```
fields = retrieve_camcol(run=4822)
plot_sky(fields)
```
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
from SciServer import CasJobs
from SciServer import Authentication
from io import StringIO
from astropy.table import Table
import requests
def kappa(phi, h):
"""Gravitational constant at observatory relative to equator
Parameters
----------
phi : np.float32
latitude of observatory (deg)
h : np.float32
height of observatory from sea level (meters)
"""
k = 1 + 5.302 / 1E3 * np.sin(phi)**2 - 5.83 / 1E6 * np.sin(2. * phi)**2 - 3.15 / 1E7 * h
return k
def beta(T):
"""Scaling of temperature
Parameters
----------
T : np.float32
temperature in K
"""
return(4.5908 / 1E6 * T)
def refraction(lam=5500., T=285., Ps=1000., RH=0.2,
phi=0., h=0., z=0., n=None):
"""Refraction calculation
Parameters
----------
lam : np.float32
wavelength (A) (default 5500.)
T : np.float32
temperature in K (default 285.)
Ps : np.float32
pressure (mbar) (default 1000.)
RH : np.float32
relative humidity (default 0.2)
phi : np.float32
latitude (deg) (default 0.)
h : np.float32
height from sea level (default 0.)
z : np.float32
zenith angle (deg) (default 0.)
n : np.float32
index of refraction (by default sets with local_ref())
Returns:
-------
R : np.float32
refraction in arcsec (difference of apparent to observed zenith angle)
"""
if(n is None):
n = local_ref(lam, T, Ps, RH)
zrad = z * np.pi / 180.
k = kappa(phi, h)
b = beta(T)
R = k * (n - 1) * (1 - b) * np.tan(zrad) - k * (1 - n) * (b - (n - 1) / 2) * np.tan(zrad)**3
R = R * 180. / np.pi * 3600.
return R
def local_ref(lam=5500., T=285., Ps=1000., RH=0.2):
"""Local refraction index
Parameters
----------
lam : np.float32
wavelength (A) (default 5500.)
T : np.float32
temperature in K (default 285.)
Ps : np.float32
pressure (mbar) (default 1000.)
RH : np.float32
relative humidity (default 0.2)
Returns:
-------
n : np.float32
index of refraction (by default sets with local_ref())
"""
sigma=10**4/lam
Pw=RH/1E4*np.exp(77.3450+0.0057*T-7235.0/T)/T**8.2
Ds=(1+(Ps-Pw)*(57.90/1E8-9.3250*1E-4/T+0.25844/T**2))*(Ps-Pw)/T
Dw=(1+Pw*(1+3.7/1E4*Pw)*(-2.37321/1E3+2.23366/T-710.792/T**2+7.75141*10**4/T**3))*Pw/T
n=1+((2371.34+683939.7/(130-sigma)+4547.3/(38.9-sigma**2))*Ds
+(6487.31+58.058*sigma**2-0.71150*sigma**4+0.08851*sigma**6)*Dw)*1E-8
return n
def refraction_pp(lam=5500., T=285., Ps=1000., RH=0.2,
z=0., n=None):
"""Refraction calculation in plane-parallel approximation
Parameters
----------
lam : np.float32
wavelength (A) (default 5500.)
T : np.float32
temperature in K (default 285.)
Ps : np.float32
pressure (mbar) (default 1000.)
RH : np.float32
relative humidity (default 0.2)
z : np.float32
zenith angle (deg) (default 0.)
n : np.float32
index of refraction (by default sets with local_ref())
Returns:
-------
R : np.float32
refraction in arcsec (difference of apparent to observed zenith angle)
"""
if(n is None):
n = local_ref(lam, T, Ps, RH)
zrad = z * np.pi / 180.
zobsrad = np.arcsin(np.sin(zrad) / n)
R = (zrad - zobsrad) * 180. / np.pi * 3600.
return R
nlam = 1000
lamstart = 3500.
lamend = 20000.
lam = lamstart + (lamend - lamstart) * np.arange(nlam) / np.float32(nlam - 1)
n = local_ref(lam=lam)
plt.plot(lam, n - 1.)
plt.xlabel('Wavelength (Ang)')
plt.ylabel('n - 1')
nz = 100
z = 75. * np.arange(nz) / np.float32(nz)
Rpp = refraction_pp(z=z)
R = refraction(z=z)
fig, ax = plt.subplots(1, 2, figsize=(9, 4))
ax[0].plot(z, R, color='red', label='Spherical')
ax[0].plot(z, Rpp, color='black', label='Plane-parallel')
ax[0].set_xlabel("Zenith angle (deg)")
ax[0].set_ylabel("Refraction (arcsec)")
ax[0].legend()
ax[1].plot(z, R - Rpp)
ax[1].set_xlabel("Zenith angle (deg)")
ax[1].set_ylabel("Spherical minus Plane-parallal (arcsec)")
z = 33.
Rpp = refraction_pp(z=z, lam=lam)
R = refraction(z=z, lam=lam)
fig, ax = plt.subplots(1, 2, figsize=(16, 4))
ax[0].plot(lam, R, color='red', label='Spherical')
ax[0].plot(lam, Rpp, color='black', label='Plane-parallel')
ax[0].set_xlabel("Wavelength (Ang)")
ax[0].set_ylabel("Refraction (arcsec)")
ax[0].legend()
ax[1].plot(lam, R - Rpp)
ax[0].set_xlabel("Wavelength (Ang)")
ax[1].set_ylabel("Spherical minus Plane-parallal (arcsec)")
def retrieve_camcol(run=756, camcol=3):
# We define the columns we want and their
columns = ('ra', 'dec', 'airmass_g', 'airmass_i', 'sky_g', 'sky_i', 'field')
dtypes= ('f8', 'f8', 'f4', 'f4', 'f4', 'f4', 'i4')
# Now define the query
query = """
SELECT {columns}
FROM Field
WHERE run = {run} and camcol = {camcol}
"""
query = query.format(columns=', '.join(list(columns)),
run=run, camcol=camcol)
# Execute the query (requires internet access)
responseStream = CasJobs.executeQuery(query, "DR12", format="dict")
# convert result into astropy table
result = responseStream['Result'][0]
data = list(map(list, zip(*result['Data'])))
fields = Table(data, names=columns, dtype=dtypes)
isort = np.argsort(fields['field'])
fields = fields[isort]
return(fields)
def plot_sky(fields=None):
field = fields['field']
sky_g = 22.5 - 2.5 * np.log10(fields['sky_g'])
sky_i = 22.5 - 2.5 * np.log10(fields['sky_i'])
airmass_g = fields['airmass_g']
airmass_i = fields['airmass_i']
fig, axes = plt.subplots(2, 2, figsize=(10, 10))
axes[0, 0].plot(field, sky_g)
axes[0, 0].set_xlabel('Field Number')
axes[0, 0].set_ylabel('g_band sky (mag/arcsec$^2$)')
axes[0, 1].plot(airmass_g, sky_g, '.')
axes[0, 1].set_xlabel('airmass g')
axes[0, 1].set_ylabel('g_band sky (mag/arcsec$^2$)')
axes[1, 0].plot(field, sky_i)
axes[1, 0].set_xlabel('Field Number')
axes[1, 0].set_ylabel('i_band sky (mag/arcsec$^2$)')
axes[1, 1].plot(airmass_i, sky_i, '.')
axes[1, 1].set_xlabel('airmass i')
axes[1, 1].set_ylabel('i_band sky (mag/arcsec$^2$)')
fields = retrieve_camcol(run=4905)
plot_sky(fields)
fields = retrieve_camcol(run=4822)
plot_sky(fields)
| 0.834171 | 0.983455 |
```
%load_ext sql
%config SqlMagic.feedback = False
%matplotlib inline
import os
import pandas as pd
import numpy as np
from mapboxgl.utils import create_color_stops, df_to_geojson
from mapboxgl.viz import CircleViz
# Get env vars from local.env
host = %env DB_HOSTNAME
user = %env DB_USERNAME
password = %env DB_PASSWORD
db = %env DB_NAME
token = %env MAPBOX_API_KEY
# Connection URL to our local MySQL DB
#%sql mysql+mysqldb://{user}:{password}@{host}/{db}?charset=utf8
#subs = %sql SELECT * FROM submissions WHERE from_mlab = 0
#subs = subs.DataFrame()
#subs['latitude'].dropna(inplace=True)
#subs['longitude'].dropna(inplace=True)
#subs.describe()
columns = [
'id', 'testing_for', 'address', 'zip_code', 'provider',
'connected_with', 'monthly_price', 'provider_down_speed',
'provider_price', 'actual_down_speed', 'actual_price', 'rating',
'completed', 'created_at', 'updated_at', 'latitude', 'longitude',
'ping', 'actual_upload_speed', 'test_id', 'ip_address', 'hostname',
'from_mlab', 'area_code', 'test_type', 'census_code',
'upload_median', 'download_median', 'census_status', 'test_date',
'country_code', 'region', 'county', 'accuracy', 'location',
'census_block'
]
# Load and filter CSV in chunks (less memory, require 5GB to load full dataset) to just from_mlab = 0 (SUA submissions)
iter_csv = pd.read_csv('./data/9035f7b8-2d2f-4de0-a816-4067e1ae8fd8.csv', header=None, names=columns, iterator=True, chunksize=1000)
sua = pd.concat([chunk[chunk['from_mlab'] == 0] for chunk in iter_csv])
# Cleanup
sua['rating'] = sua['rating'].replace(0.0, np.nan)
#lane = sua[sua['county'] == 41039].copy()
lane = sua.copy()
lane.fillna(value=np.nan, inplace=True)
#lane = lane[lane.accuracy.notnull() & lane.latitude.notnull() & lane.longitude.notnull()]
lane = lane[lane.latitude.notnull() & lane.longitude.notnull()]
lane.describe()
# Create a geojson file export from a Pandas dataframe
#df_to_geojson(lane, filename='points.geojson', properties=['accuracy'], lat='latitude', lon='longitude', precision=3)
df_to_geojson(lane, filename='points.geojson', properties=[], lat='latitude', lon='longitude', precision=3)
# Generate data breaks and color stops from colorBrewer
color_breaks = [0, 10, 25, 50, 100, 1000]
color_stops = create_color_stops(color_breaks, colors='YlGnBu')
# Create the viz from the dataframe
viz = CircleViz('points.geojson',
access_token=token,
height='800px',
# color_property = "accuracy",
color_stops = color_stops,
center = (-123.09, 44.082),
zoom = 8,
below_layer = 'waterway-label'
)
viz.show()
```
|
github_jupyter
|
%load_ext sql
%config SqlMagic.feedback = False
%matplotlib inline
import os
import pandas as pd
import numpy as np
from mapboxgl.utils import create_color_stops, df_to_geojson
from mapboxgl.viz import CircleViz
# Get env vars from local.env
host = %env DB_HOSTNAME
user = %env DB_USERNAME
password = %env DB_PASSWORD
db = %env DB_NAME
token = %env MAPBOX_API_KEY
# Connection URL to our local MySQL DB
#%sql mysql+mysqldb://{user}:{password}@{host}/{db}?charset=utf8
#subs = %sql SELECT * FROM submissions WHERE from_mlab = 0
#subs = subs.DataFrame()
#subs['latitude'].dropna(inplace=True)
#subs['longitude'].dropna(inplace=True)
#subs.describe()
columns = [
'id', 'testing_for', 'address', 'zip_code', 'provider',
'connected_with', 'monthly_price', 'provider_down_speed',
'provider_price', 'actual_down_speed', 'actual_price', 'rating',
'completed', 'created_at', 'updated_at', 'latitude', 'longitude',
'ping', 'actual_upload_speed', 'test_id', 'ip_address', 'hostname',
'from_mlab', 'area_code', 'test_type', 'census_code',
'upload_median', 'download_median', 'census_status', 'test_date',
'country_code', 'region', 'county', 'accuracy', 'location',
'census_block'
]
# Load and filter CSV in chunks (less memory, require 5GB to load full dataset) to just from_mlab = 0 (SUA submissions)
iter_csv = pd.read_csv('./data/9035f7b8-2d2f-4de0-a816-4067e1ae8fd8.csv', header=None, names=columns, iterator=True, chunksize=1000)
sua = pd.concat([chunk[chunk['from_mlab'] == 0] for chunk in iter_csv])
# Cleanup
sua['rating'] = sua['rating'].replace(0.0, np.nan)
#lane = sua[sua['county'] == 41039].copy()
lane = sua.copy()
lane.fillna(value=np.nan, inplace=True)
#lane = lane[lane.accuracy.notnull() & lane.latitude.notnull() & lane.longitude.notnull()]
lane = lane[lane.latitude.notnull() & lane.longitude.notnull()]
lane.describe()
# Create a geojson file export from a Pandas dataframe
#df_to_geojson(lane, filename='points.geojson', properties=['accuracy'], lat='latitude', lon='longitude', precision=3)
df_to_geojson(lane, filename='points.geojson', properties=[], lat='latitude', lon='longitude', precision=3)
# Generate data breaks and color stops from colorBrewer
color_breaks = [0, 10, 25, 50, 100, 1000]
color_stops = create_color_stops(color_breaks, colors='YlGnBu')
# Create the viz from the dataframe
viz = CircleViz('points.geojson',
access_token=token,
height='800px',
# color_property = "accuracy",
color_stops = color_stops,
center = (-123.09, 44.082),
zoom = 8,
below_layer = 'waterway-label'
)
viz.show()
| 0.537284 | 0.214918 |
```
import torch
import torch.nn as nn
from matplotlib import pyplot as plt
from torch.utils.data import DataLoader
import mimic
from mimic.utils import text as text
from mimic.utils.experiment import MimicExperiment
from mimic.utils.filehandling import get_config_path
from mimic.utils.flags import parser
from mimic.utils.flags import update_flags_with_config
from dataclasses import dataclass
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
FLAGS = parser.parse_args([])
config_path = get_config_path(FLAGS)
flags = update_flags_with_config(config_path, testing=True)
flags.modality = 'PA'
flags.img_size = 128
flags.text_encoding = 'word'
flags.feature_extractor_img = 'resnet'
flags.batch_size = 1
flags.dataloader_workers = 0
flags.device = device
flags.normalization = False
flags.len_sequence = 128
flags.str_experiment = 'something'
flags.alpha_modalities = [flags.div_weight_uniform_content, flags.div_weight_m1_content,
flags.div_weight_m2_content, flags.div_weight_m3_content]
flags.use_clf = False
flags.dir_gen_eval_fid = 'fdgb'
exp = MimicExperiment(flags)
exp.plot_img_size = torch.Size([1, flags.img_size, flags.img_size])
exp.modalities['text'].plot_img_size = torch.Size([1, flags.img_size+128, flags.img_size])
mods = exp.modalities
trainloader = DataLoader(exp.dataset_train, batch_size=flags.batch_size,
shuffle=False,
num_workers=flags.dataloader_workers, pin_memory=False)
nbr_samples = 5
def get_datas(which_label:int):
datas = {'PA': [], 'Lateral': [], 'text': []}
texts = []
labels_list = []
for data, labels in trainloader:
if labels[0][which_label].item() == 1:
for mod_key, mod in mods.items():
datas[mod_key].append(mod.plot_data(exp, data[mod_key].squeeze(0)))
if mod_key == 'text':
texts.append(data[mod_key])
labels_list.append(labels[0].tolist())
if len(datas[mod_key]) == nbr_samples:
return datas, texts, labels_list
datas, texts, labels_list = get_datas(2)
rec = torch.Tensor()
for mod in mods:
for idx in range(nbr_samples):
if mod == 'text':
img = datas[f'{mod}'][idx].cpu().unsqueeze(0)
else:
img = datas[f'{mod}'][idx].cpu()
# pad the non text modalities such that they fit in a wider rectangle.
m = nn.ZeroPad2d((64, 64, 0, 0))
img = m(img.squeeze()).unsqueeze(0).unsqueeze(0)
rec = torch.cat((rec, img), 0)
fig = mimic.utils.plot.create_fig(f'something.png',
img_data=rec,
num_img_row=nbr_samples, save_figure=False)
plt.imshow(fig)
plt.show()
plt.close()
for i in range(nbr_samples):
text_sample = text.tensor_to_text(exp, texts[i], one_hot=False)[0]
text_sample = [word for word in text_sample if word != '<pad>']
print(' '.join(text_sample).replace('.', '.\n'))
print(labels_list[i])
plt.figure()
plt.subplot(1, 2, 1)
plt.imshow(datas['PA'][i].squeeze())
plt.subplot(1, 2, 2)
plt.imshow(datas['Lateral'][i].squeeze())
plt.show()
plt.close()
```
# Lung Opacity
```
datas, texts, labels_list = get_datas(0)
for i in range(nbr_samples):
text_sample = text.tensor_to_text(exp, texts[i], one_hot=False)[0]
text_sample = [word for word in text_sample if word != '<pad>']
print(' '.join(text_sample).replace('.', '.\n'))
print(labels_list[i])
plt.figure()
plt.subplot(1, 2, 1)
plt.imshow(datas['PA'][i].squeeze())
plt.subplot(1, 2, 2)
plt.imshow(datas['Lateral'][i].squeeze())
plt.show()
plt.close()
```
# Pleural Effusion
```
datas, texts, labels_list = get_datas(1)
for i in range(nbr_samples):
text_sample = text.tensor_to_text(exp, texts[i], one_hot=False)[0]
text_sample = [word for word in text_sample if word != '<pad>']
print(' '.join(text_sample).replace('.', '.\n'))
print(labels_list[i])
plt.figure()
plt.subplot(1, 2, 1)
plt.imshow(datas['PA'][i].squeeze())
plt.subplot(1, 2, 2)
plt.imshow(datas['Lateral'][i].squeeze())
plt.show()
plt.close()
```
# Experiments with the torch.WeightedRandomSampler
## Without sampling
```
trainloader = DataLoader(exp.dataset_train, batch_size=50,
shuffle=False,
num_workers=flags.dataloader_workers, pin_memory=False)
all_labels = torch.Tensor()
for _, labels in trainloader:
all_labels = torch.cat((all_labels, labels.cpu()), 0)
print(all_labels.shape)
{label: all_labels[:, i].sum() for i, label in enumerate(['Lung Opacity', 'Pleural Effusion', 'Support Devices'])}
```
### With sampling
```
def calculateWeights(label_dict, d_set):
arr = []
for label, count in label_dict.items():
weight = count / len(d_set)
arr.append(weight)
return arr
labels_df = exp.dataset_train.labels
counts = labels_df[labels_df == 1].count()
print(counts)
weights = calculateWeights(counts, exp.dataset_train)
weights = torch.DoubleTensor(weights)
sampler = torch.utils.data.sampler.WeightedRandomSampler(weights, len(exp.dataset_train), replacement=True)
trainloader = DataLoader(exp.dataset_train, batch_size=50, sampler=sampler,
shuffle=False,
num_workers=flags.dataloader_workers, pin_memory=False)
all_labels = torch.Tensor()
for _, labels in trainloader:
all_labels = torch.cat((all_labels, labels.cpu()), 0)
print(all_labels.shape)
{label: all_labels[:, i].sum() for i, label in enumerate(['Lung Opacity', 'Pleural Effusion', 'Support Devices'])}
```
|
github_jupyter
|
import torch
import torch.nn as nn
from matplotlib import pyplot as plt
from torch.utils.data import DataLoader
import mimic
from mimic.utils import text as text
from mimic.utils.experiment import MimicExperiment
from mimic.utils.filehandling import get_config_path
from mimic.utils.flags import parser
from mimic.utils.flags import update_flags_with_config
from dataclasses import dataclass
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
FLAGS = parser.parse_args([])
config_path = get_config_path(FLAGS)
flags = update_flags_with_config(config_path, testing=True)
flags.modality = 'PA'
flags.img_size = 128
flags.text_encoding = 'word'
flags.feature_extractor_img = 'resnet'
flags.batch_size = 1
flags.dataloader_workers = 0
flags.device = device
flags.normalization = False
flags.len_sequence = 128
flags.str_experiment = 'something'
flags.alpha_modalities = [flags.div_weight_uniform_content, flags.div_weight_m1_content,
flags.div_weight_m2_content, flags.div_weight_m3_content]
flags.use_clf = False
flags.dir_gen_eval_fid = 'fdgb'
exp = MimicExperiment(flags)
exp.plot_img_size = torch.Size([1, flags.img_size, flags.img_size])
exp.modalities['text'].plot_img_size = torch.Size([1, flags.img_size+128, flags.img_size])
mods = exp.modalities
trainloader = DataLoader(exp.dataset_train, batch_size=flags.batch_size,
shuffle=False,
num_workers=flags.dataloader_workers, pin_memory=False)
nbr_samples = 5
def get_datas(which_label:int):
datas = {'PA': [], 'Lateral': [], 'text': []}
texts = []
labels_list = []
for data, labels in trainloader:
if labels[0][which_label].item() == 1:
for mod_key, mod in mods.items():
datas[mod_key].append(mod.plot_data(exp, data[mod_key].squeeze(0)))
if mod_key == 'text':
texts.append(data[mod_key])
labels_list.append(labels[0].tolist())
if len(datas[mod_key]) == nbr_samples:
return datas, texts, labels_list
datas, texts, labels_list = get_datas(2)
rec = torch.Tensor()
for mod in mods:
for idx in range(nbr_samples):
if mod == 'text':
img = datas[f'{mod}'][idx].cpu().unsqueeze(0)
else:
img = datas[f'{mod}'][idx].cpu()
# pad the non text modalities such that they fit in a wider rectangle.
m = nn.ZeroPad2d((64, 64, 0, 0))
img = m(img.squeeze()).unsqueeze(0).unsqueeze(0)
rec = torch.cat((rec, img), 0)
fig = mimic.utils.plot.create_fig(f'something.png',
img_data=rec,
num_img_row=nbr_samples, save_figure=False)
plt.imshow(fig)
plt.show()
plt.close()
for i in range(nbr_samples):
text_sample = text.tensor_to_text(exp, texts[i], one_hot=False)[0]
text_sample = [word for word in text_sample if word != '<pad>']
print(' '.join(text_sample).replace('.', '.\n'))
print(labels_list[i])
plt.figure()
plt.subplot(1, 2, 1)
plt.imshow(datas['PA'][i].squeeze())
plt.subplot(1, 2, 2)
plt.imshow(datas['Lateral'][i].squeeze())
plt.show()
plt.close()
datas, texts, labels_list = get_datas(0)
for i in range(nbr_samples):
text_sample = text.tensor_to_text(exp, texts[i], one_hot=False)[0]
text_sample = [word for word in text_sample if word != '<pad>']
print(' '.join(text_sample).replace('.', '.\n'))
print(labels_list[i])
plt.figure()
plt.subplot(1, 2, 1)
plt.imshow(datas['PA'][i].squeeze())
plt.subplot(1, 2, 2)
plt.imshow(datas['Lateral'][i].squeeze())
plt.show()
plt.close()
datas, texts, labels_list = get_datas(1)
for i in range(nbr_samples):
text_sample = text.tensor_to_text(exp, texts[i], one_hot=False)[0]
text_sample = [word for word in text_sample if word != '<pad>']
print(' '.join(text_sample).replace('.', '.\n'))
print(labels_list[i])
plt.figure()
plt.subplot(1, 2, 1)
plt.imshow(datas['PA'][i].squeeze())
plt.subplot(1, 2, 2)
plt.imshow(datas['Lateral'][i].squeeze())
plt.show()
plt.close()
trainloader = DataLoader(exp.dataset_train, batch_size=50,
shuffle=False,
num_workers=flags.dataloader_workers, pin_memory=False)
all_labels = torch.Tensor()
for _, labels in trainloader:
all_labels = torch.cat((all_labels, labels.cpu()), 0)
print(all_labels.shape)
{label: all_labels[:, i].sum() for i, label in enumerate(['Lung Opacity', 'Pleural Effusion', 'Support Devices'])}
def calculateWeights(label_dict, d_set):
arr = []
for label, count in label_dict.items():
weight = count / len(d_set)
arr.append(weight)
return arr
labels_df = exp.dataset_train.labels
counts = labels_df[labels_df == 1].count()
print(counts)
weights = calculateWeights(counts, exp.dataset_train)
weights = torch.DoubleTensor(weights)
sampler = torch.utils.data.sampler.WeightedRandomSampler(weights, len(exp.dataset_train), replacement=True)
trainloader = DataLoader(exp.dataset_train, batch_size=50, sampler=sampler,
shuffle=False,
num_workers=flags.dataloader_workers, pin_memory=False)
all_labels = torch.Tensor()
for _, labels in trainloader:
all_labels = torch.cat((all_labels, labels.cpu()), 0)
print(all_labels.shape)
{label: all_labels[:, i].sum() for i, label in enumerate(['Lung Opacity', 'Pleural Effusion', 'Support Devices'])}
| 0.562297 | 0.569194 |
<a href="https://colab.research.google.com/github/PUC-RecSys-Class/RecSysPUC-2020/blob/master/practicos/pyRecLab_uKNN.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Tutorial pyreclab UserKNN
En este práctico vamos a utilizar la biblioteca de Python [pyreclab](https://github.com/gasevi/pyreclab) desarrollado por los Laboratorios IALab y SocVis de la Pontificia Universidad Católica de Chile, para aprender sobre User KNN (Filtrado colaborativo basado en usuarios)
**Profesor**: Denis Parra
**Ayudantes** Andrés Villa, Francisca Cattan, Vladimir Araujo, Andrés Carvallo, Manuel Cartagena.
# Índice
>[Tutorial pyreclab UserKNN](#scrollTo=NC-ceGb8LRLT)
>[Índice](#scrollTo=2j02H66f87eV)
>[Descargando la información](#scrollTo=9qKTQ2V5VKSX)
>[Revisar archivos descargados](#scrollTo=liJ3L2kl4g1o)
>>[Como ver la información de una o más películas](#scrollTo=jqXxj773Foo8)
>>[Preparar entorno](#scrollTo=P3K5VPZoGSYX)
>>[UserKNN](#scrollTo=2e0ZeZXzMhU3)
>>[Predecir el rating que le dará un usuario a un cierto item](#scrollTo=lEx2TVc1B5wk)
>>[Generar una lista de recomendaciones para un usuario](#scrollTo=dIVXNsQ8CCJV)
>>[Explorar los hiperparámetros del modelo UserKnn](#scrollTo=6_Kp3zRwJ9gv)
>>>[Evaluar en base a error de prediccion (RMSE y MAE)](#scrollTo=g55SDLmqCpUo)
# Descargando la información
Ejecutar cada celda presionando el botón de **Play** o presionando Ctrl+Enter (Linux y Windows) o Command+Enter (OSX) para descargar las bases de datos
```
!curl -L -o "u1.base" "https://drive.google.com/uc?export=download&id=1bGweNw7NbOHoJz11v6ld7ymLR8MLvBsA"
!curl -L -o "u1.test" "https://drive.google.com/uc?export=download&id=1f_HwJWC_1HFzgAjKAWKwkuxgjkhkXrVg"
!curl -L -o "u.item" "https://drive.google.com/uc?export=download&id=10YLhxkO2-M_flQtyo9OYV4nT9IvSESuz"
```
Los archivos **u1.base** y **u1.test** tienen tuplas {usuario, item, rating, timestamp}, que es la información de preferencias de usuarios sobre películas en una muestra del dataset [movielens](https://grouplens.org/datasets/movielens/).
# Revisar archivos descargados
Revisemos cómo es uno de estos archivos:
```
import pandas as pd
train_file = pd.read_csv('u1.base', sep='\t', names = ['userid', 'itemid', 'rating', 'timestamp'], header=None)
train_file.head()
# Ver la o las filas específicas del item con id = 1653
train_file[train_file['itemid'] == 1653]
```
Por otra parte, para obtener información adicional de cada película tal como *título*, *fecha de lanzamient*o, *género*, etc., cargaremos el archivo de items descargado ( *u.item* ) para poder mapear cada identificador de ítem al conjunto de datos que lo describe.
Revisemos el contenido de este archivo
```
info_cols = [ 'movieid', 'title', 'release_date', 'video_release_date', 'IMDb_URL', \
'unknown', 'Action', 'Adventure', 'Animation', 'Children', 'Comedy', \
'Crime', 'Documentary', 'Drama', 'Fantasy', 'Film-Noir', 'Horror', \
'Musical', 'Mystery', 'Romance', 'Sci-Fi', 'Thriller', 'War', 'Western' ]
pd.options.display.max_columns = None
info_file = pd.read_csv('u.item', sep='|', index_col = 0, names = info_cols, header=None, encoding='latin-1')
info_file.head()
```
El comando `pd.options.display.max_columns = None` permite que la tabla se pueda mostrar completamente de forma horizontal, es decir, que muestre todas sus columnas.
```
info_file.info()
info_file.describe()
```
Con las funciones `info` y `describe` de ***pandas*** es una buena forma de analizar a grandes rasgos un dataset.
## Como ver la información de una o más películas
Para esto se hace 2 pasos,
1. Determinar los índices de las películas
2. Pedir a `info_file` las columnas
3. (adicional) Indicar alguna columna en específico
```
# Ejemplo de cómo visualizar titulos de peliculas en base a sus IDs
# Paso 1
pelis = [5,4,1]
# Paso 2
info_file.loc[pelis]
```
Para éste código:
1. `pelis = [5,4,1]` indica que queremos las películas cuyo índice son 5, 4 y 1
2. `info_file.loc[pelis]` el método `loc` permite acceder a esas columna
A continuación se verá como obtener una columna en específico. Para esto solo es necesario agregar corchetes y el nombre de la columna
```
# Paso 3 (opcional)
info_file.loc[pelis]['title']
```
## Preparar entorno
Primero es necesario instalar una librería para realizar recomendaciones. Esta se llama [***pyreclab***](https://github.com/gasevi/pyreclab)
```
!pip install pyreclab
```
Luego necesitamos importar pyreclab y numpy, librerías que utilizaremos en este práctico
```
import pyreclab
import numpy as np
```
## UserKNN
Probar método de recomendación basado en ***UserKNN***
```
# Declarar el objeto recomendador UserKnn
myUserKnn = pyreclab.UserKnn(dataset='u1.base',
dlmchar=b'\t',
header=False,
usercol=0,
itemcol=1,
ratingcol=2)
```
Se puede ver que al objeto recomendador le estamos entregando:
* ***dataset***: La ruta al dataset, en este caso está en la misma carpeta que este notebook
* ***dlmchar***: El delimitador del archivo, en este caso las columnas estan separadas por un tab (\t)
* ***header***: Si el dataset tiene una fila de header (por ejemplo los nombres de las columnas)
* ***usercol***: número de cuál columna corresponde a los usuarios
* ***itemcol***: número de cuál columna corresponde a los items
* ***ratingcol***: número de cuál columna corresponde a los ratings
```
# Entrenamos el modelo con los datos existentes
# Recuerde que en este método puede probar el parámetro k de cantidad de vecinos
# Así como la métrica de similaridad (pearson, cosine)
myUserKnn.train(k=7, similarity='pearson')
```
## Predecir el rating que le dará un usuario a un cierto item
Según el modelo de recomendación UserKnn, qué rating le dará el usuario 457 al item 37?
```
myUserKnn.predict("457", "37")
```
## Generar una lista de recomendaciones para un usuario
Generar lista de 5 recomendaciones para el usuario con ID 457. Indique qué películas son con todas sus columnas. (Recuerde el comando `loc` vista anteriormete)
```
reclist_userKnn = myUserKnn.recommend("457", 5)
pelis_userKnn = np.array(reclist_userKnn).astype(int)
info_file.loc[pelis_userKnn]['title']
```
## Explorar los hiperparámetros del modelo UserKnn
Uno de los parámetros que vamos a explorar es el número de vecinos escogidos (Items), para esto calculamos el error del modelo con varios valores de vecinos y observamos cómo es el comportamiento del error.
```
k_values = [5, 10, 30, 50] # definir distintos valores de vecinos (k)
mae_values = [] # para almacenar resultados de MAE
rmse_values = [] # para almacenar valores de RMSE
for k in k_values:
myUserKnn.train(k,'cosine')
predlist, mae, rmse = myUserKnn.test( input_file = 'u1.test',dlmchar = b'\t', header = False, usercol = 0, itemcol = 1, ratingcol = 2, output_file = 'predictions.csv' )
rmse_values.append(rmse)
mae_values.append(mae)
print('RMSE:', rmse_values)
print('MAE:', mae_values)
```
### Evaluar en base a error de prediccion (RMSE y MAE)
```
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(k_values, rmse_values, 'r-')
plt.plot(k_values, rmse_values, 'bo')
plt.show()
plt.plot(k_values, mae_values, 'y-')
plt.plot(k_values, mae_values, 'go')
plt.show()
```
Según los resultados: ¿ qué numero de vecinos es mejor para hacer recomendaciones con un menor error de predicción ?
Podemos observar que el menor error se da con **k=30**, por lo tanto podemos reentrenar con este valor y ver loa resultados.
```
myUserKnn.train(k=30, similarity='cosine')
reclist_userKnn = myUserKnn.recommend("457", 5)
pelis_userKnn = np.array(reclist_userKnn).astype(int)
info_file.loc[pelis_userKnn]['title']
```
|
github_jupyter
|
!curl -L -o "u1.base" "https://drive.google.com/uc?export=download&id=1bGweNw7NbOHoJz11v6ld7ymLR8MLvBsA"
!curl -L -o "u1.test" "https://drive.google.com/uc?export=download&id=1f_HwJWC_1HFzgAjKAWKwkuxgjkhkXrVg"
!curl -L -o "u.item" "https://drive.google.com/uc?export=download&id=10YLhxkO2-M_flQtyo9OYV4nT9IvSESuz"
import pandas as pd
train_file = pd.read_csv('u1.base', sep='\t', names = ['userid', 'itemid', 'rating', 'timestamp'], header=None)
train_file.head()
# Ver la o las filas específicas del item con id = 1653
train_file[train_file['itemid'] == 1653]
info_cols = [ 'movieid', 'title', 'release_date', 'video_release_date', 'IMDb_URL', \
'unknown', 'Action', 'Adventure', 'Animation', 'Children', 'Comedy', \
'Crime', 'Documentary', 'Drama', 'Fantasy', 'Film-Noir', 'Horror', \
'Musical', 'Mystery', 'Romance', 'Sci-Fi', 'Thriller', 'War', 'Western' ]
pd.options.display.max_columns = None
info_file = pd.read_csv('u.item', sep='|', index_col = 0, names = info_cols, header=None, encoding='latin-1')
info_file.head()
info_file.info()
info_file.describe()
# Ejemplo de cómo visualizar titulos de peliculas en base a sus IDs
# Paso 1
pelis = [5,4,1]
# Paso 2
info_file.loc[pelis]
# Paso 3 (opcional)
info_file.loc[pelis]['title']
!pip install pyreclab
import pyreclab
import numpy as np
# Declarar el objeto recomendador UserKnn
myUserKnn = pyreclab.UserKnn(dataset='u1.base',
dlmchar=b'\t',
header=False,
usercol=0,
itemcol=1,
ratingcol=2)
# Entrenamos el modelo con los datos existentes
# Recuerde que en este método puede probar el parámetro k de cantidad de vecinos
# Así como la métrica de similaridad (pearson, cosine)
myUserKnn.train(k=7, similarity='pearson')
myUserKnn.predict("457", "37")
reclist_userKnn = myUserKnn.recommend("457", 5)
pelis_userKnn = np.array(reclist_userKnn).astype(int)
info_file.loc[pelis_userKnn]['title']
k_values = [5, 10, 30, 50] # definir distintos valores de vecinos (k)
mae_values = [] # para almacenar resultados de MAE
rmse_values = [] # para almacenar valores de RMSE
for k in k_values:
myUserKnn.train(k,'cosine')
predlist, mae, rmse = myUserKnn.test( input_file = 'u1.test',dlmchar = b'\t', header = False, usercol = 0, itemcol = 1, ratingcol = 2, output_file = 'predictions.csv' )
rmse_values.append(rmse)
mae_values.append(mae)
print('RMSE:', rmse_values)
print('MAE:', mae_values)
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(k_values, rmse_values, 'r-')
plt.plot(k_values, rmse_values, 'bo')
plt.show()
plt.plot(k_values, mae_values, 'y-')
plt.plot(k_values, mae_values, 'go')
plt.show()
myUserKnn.train(k=30, similarity='cosine')
reclist_userKnn = myUserKnn.recommend("457", 5)
pelis_userKnn = np.array(reclist_userKnn).astype(int)
info_file.loc[pelis_userKnn]['title']
| 0.33644 | 0.926037 |
```
#pip install selenium
import selenium
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
stock_dictionary = {
"BG" : ["Bunge Limited", "Agrar"],
"ALSN" : ["Allison Transmission", "Automobile"],
"AN" : ["Autonation", "Automobile"],
"BMW" : ["BMW", "Automobile"],
"DAI" : ["Daimler", "Automobile"],
"GE" : ["General Electric", "Automobile"],
"TSLA" : ["Tesla", "Automobile"],
"DB" : ["Deutsche Bank", "Banking"],
"CBK" : ["Commerzbank", "Banking"],
"ABBV" : ["Abbvie Inc.", "Biotech"],
"ACAD" : ["Acadia Pharmaceuticals Inc.", "Biotech"],
"AMGN" : ["Amgen Inc.", "Biotech"],
"BIIB" : ["Biogen Inc.", "Biotech"],
"CRSP" : ["Crispr Therapeutics AG", "Biotech"],
"10E" : ["Aphria Inc.", "Cannabis"],
"AGEEF": ["Halo Labs Inc.", "Cannabis"],
"CGC" : ["Canopy Growth", "Cannabis"],
"CRON" : ["Cronos Group", "Cannabis"],
"TLRY" : ["Tilray Inc.", "Cannabis"],
"ACB" : ["Aurora Cannabis", "Cannabis"],
"CLDR" : ["Cloudera Inc.", "Cloud"],
"CRM" : ["Salesforce", "Cloud"],
"DOCU" : ["DocuSign", "Cloud"],
"NOW" : ["Service Now", "Cloud"],
"APX" : ["Appen Limited", "Datenverarbeitung"],
"D6H" : ["Datagroup", "Datenverarbeitung"],
"D" : ["Dominion Energy", "Energy"],
"386" : ["Sinopec", "Energy"],
"NEE" : ["NextEra Energy", "Energy"],
"R6C" : ["Royal Dutch Shell", "Energy"],
"DIS" : ["Walt Disney", "Entertainment"],
"AAPL" : ["Apple Inc.", "FANG"],
"AMZN" : ["Amazon Com Inc.", "FANG"],
"FB" : ["Facebook Inc.", "FANG"],
"GOOGL": ["Alphabet Inc.", "FANG"],
"NFLX" : ["Netflix Inc.", "FANG"],
"PYPL" : ["PayPal Inc.", "Fintech"],
"XAUMF": ["Goldmoney Inc.", "Fintech"],
"SQ" : ["Square Inc.", "Fintech"],
"BC" : ["Brunswick Corp.", "Tourism"],
"RCL" : ["Royal Caribbean Group", "Tourism"],
"EA" : ["Electronic Arts Inc.", "Gaming"],
"GME" : ["Gamestop", "Gaming"],
"TCEHY" : ["Tencent Holding LTD", "Gaming"],
"CDW" : ["CDW Corp.", "Trade"],
"AGNC" : ["AGNC Investment Corp.", "Real Estate"],
"CBRE" : ["CBR Group Inc.", "Real Estate"],
"MHK" : ["Mohawk Industries", "Real Estate"],
"EPR" : ["EPR Properties", "Real Estate"],
"PLD" : ["Prologis Inc.", "Real Estate"],
"CFX" : ["Colfax Corporation", "Industry"],
"DOV" : ["Dover Corp.", "Industry"],
"MT" : ["Arcelormittal SA.", "Industry"],
"CLF" : ["Cleveland Cliffs Inc.", "Industry"],
"SLCA" : ["Silicia Holdings Inc.", "Industry"],
"BAK" : ["Braskem", "Industry"],
"TKA" : ["Thyssenkrupp AG", "Industry"],
"SLB" : ["Schlumberger Limited", "Industry"],
"MMM" : ["3M Company", "Industry"],
"9984" : ["Softbank Group", "Investors"],
"BRK.B": ["Berkshire Hathaway Inc.", "Investors"],
"LMT" : ["Lockheed Martin Corp.", "Nuclear Fusion"],
"BATS" : ["British American Tobacco", "Consumer Products"],
"JNJ" : ["Johnson & Johnson", "Consumer Products"],
"NESN" : ["Nestlé", "Consumer Products"],
"PG" : ["Procter & Gamble", "Consumer Products"],
"BTCUSD": ["Bitcoin", "Crypto"],
"ETHUSD": ["Ethereum", "Crypto"],
"IOTUSD": ["Iota", "Crypto"],
"LINKUSDT": ["Chainlink", "Crypto"],
"BYND" : ["Beyond Meat", "Consumer Brands"],
"LVMH" : ["Louis Vuitton", "Consumer Brands"],
"TXN" : ["Texas Instruments", "Consumer Brands"],
"XIACF": ["Xiaomi Corp.", "Consumer Brands"],
"ADS" : ["Adidas AG", "Consumer Brands"],
"CMG" : ["Chipotle Mexican Grill", "Consumer Brands"],
"ISRG" : ["Intuitive Surgical Inc.", "Medi-Tech"],
"ACIA" : ["Acacia Communications Inc.", "Network"],
"BR" : ["Broadridge Financial Solutions", "Network"],
"COMM" : ["Commscope Holdings", "Network"],
"M0Y" : ["Mynaric AG", "Network"],
"BBY" : ["Best Buy", "E-Commerce"],
"BABA" : ["Alibaba Group", "E-Commerce"],
"OCDO" : ["Ocado Group PLC", "E-Commerce"],
"SHOP" : ["Shopify Inc.", "E-Commerce"],
"FVRR" : ["Fiverr LTD", "E-Commerce"],
"ABC" : ["Amerisourcebergen Corp.", "Pharma"],
"ABT" : ["Abbott Laboratories", "Pharma"],
"BAS" : ["BASF", "Pharma"],
"BAYN" : ["Bayer AG", "Pharma"],
"CVS" : ["CVS Healthcare", "Pharma"],
"DVA" : ["Davita Inc.", "Pharma"],
"MCK" : ["Mckesson Corp.", "Pharma"],
"NOVO_B": ["Novo Nordisk", "Pharma"],
"OHI" : ["Omega Healthcare", "Pharma"],
"SPCE" : ["Virgin Galactic Holdings", "Space"],
"UFO" : ["Procure ETF", "Space"],
"AA" : ["Alcoa Corporation", "Raw Materials"],
"MPC" : ["Marathon Petroleum", "Raw Materials"],
"XAUUSD": ["Gold", "Raw Materials"],
"EMPR" : ["Empire Petroleum", "Raw Materials"],
"USOIL": ["WTI Crude Oil", "Raw Materials"],
"LAC" : ["Lithium Americas Corp.", "Raw Materials"],
"PALL" : ["Aberdeen Palladium", "Raw Materials"],
"NNIC" : ["Norilsk Nickel", "Raw Materials"],
"GOLD" : ["Barrick Gold", "Raw Materials"],
"KL" : ["Kirkland Lake Gold", "Raw Materials"],
"AMAT" : ["Applied Materials Inc.", "Tech/ Chips"],
"AMD" : ["Advanced Micro Devices", "Tech/ Chips"],
"AVGO" : ["Broadcom Inc.", "Tech/ Chips"],
"BIDU" : ["Baidu Inc.", "Tech/ Chips"],
"CDNS" : ["Cadence Design Systems", "Tech/ Chips"],
"IFX" : ["Infineon Tech AG", "Tech/ Chips"],
"INTC" : ["Intel Corp.", "Tech/ Chips"],
"NVDA" : ["Nvidia Corp.", "Tech/ Chips"],
"ORCL" : ["Oracle Corp.", "Tech/ Chips"],
"QCOM" : ["Qualcomm Inc.", "Tech/ Chips"],
"SNAP" : ["Snap Inc.", "Tech/ Chips"],
"WB" : ["Weibo Corp.", "Tech/ Chips"],
"WDC" : ["Western Digital Corp.", "Tech/ Chips"],
"MSFT" : ["Microsoft Corp.", "Tech/ Chips"],
"ASML" : ["ASML Holding", "Tech/ Chips"],
"TSM" : ["Taiwan Semiconductor", "Tech/ Chips"],
"2318" : ["Ping An", "Insurance"],
"ACGL" : ["Arch Capital Group", "Insurance"],
"AON" : ["Aon PLC", "Insurance"],
"D7G" : ["Nel Asa", "Hydrogen"],
"BLDP" : ["Ballard Power", "Hydrogen"],
"27W" : ["Powercell Sweden", "Hydrogen"],
"LIN" : ["Linde PLC", "Hydrogen"]}
username = ""
password = ""
options = webdriver.ChromeOptions()
prefs = {'download.default_directory' : '/Users/admin/Desktop/spearmint-vector-student-code/Final_Project/daily_stock_data'}
options.add_experimental_option('prefs', prefs)
driver = webdriver.Chrome(options=options)
driver.get("https://www.tradingview.com/")
driver.implicitly_wait(10)
signin = driver.find_element_by_class_name("tv-header__link.tv-header__link--signin.js-header__signin")
signin.click()
driver.implicitly_wait(5)
email = driver.find_element_by_class_name("tv-signin-dialog__social.tv-signin-dialog__toggle-email.js-show-email")
email.click()
driver.implicitly_wait(4)
user = driver.find_element_by_name("username")
user.send_keys(username)
driver.implicitly_wait(2)
password = driver.find_element_by_name("password")
password.send_keys("")
driver.implicitly_wait(3)
signin2 = driver.find_element_by_class_name("tv-button.tv-button--size_large.tv-button--primary.tv-button--loader")
signin2.click()
time.sleep(5)
element = WebDriverWait(driver, 3).until(
EC.presence_of_element_located((By.CLASS_NAME, "tv-mainmenu__item.tv-mainmenu__item--chart")))
element.click()
time.sleep(10)
for key in stock_dictionary:
search = driver.find_element_by_class_name("button-2ioYhFEY.button-o2NxAp-P.apply-common-tooltip.isInteractive-20uLObIc")
search.click()
driver.implicitly_wait(3)
search2 = driver.find_element_by_class_name("search-2XsDfq16.upperCase-UYMmoP0p.input-2pz7DtzH")
search2.clear()
search2.send_keys(key)
search2.send_keys(Keys.RETURN)
driver.implicitly_wait(5)
menu = driver.find_element_by_class_name("button-9U4gleap.button-2ioYhFEY.apply-common-tooltip.isInteractive-20uLObIc")
menu.click()
driver.implicitly_wait(3)
export = driver.find_element_by_class_name("apply-common-tooltip.common-tooltip-vertical.item-2xPVYue0.item-1dXqixrD")
export.click()
driver.implicitly_wait(3)
chart = driver.find_element_by_xpath("(//*[@class='selected-2IjEMdXr'])[1]")
chart.click()
driver.implicitly_wait(2)
daily = driver.find_element_by_xpath("(//*[@class='label-3Xqxy756'])[2]")
daily.click()
driver.implicitly_wait(2)
t_format = driver.find_element_by_xpath("(//*[@class='selected-2IjEMdXr'])[2]")
t_format.click()
driver.implicitly_wait(3)
iso = driver.find_element_by_xpath("(//*[@class='labelRow-3Q0rdE8-'])[1]")
iso.click()
driver.implicitly_wait(3)
download = driver.find_element_by_name("submit")
download.click()
driver.quit()
```
|
github_jupyter
|
#pip install selenium
import selenium
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
stock_dictionary = {
"BG" : ["Bunge Limited", "Agrar"],
"ALSN" : ["Allison Transmission", "Automobile"],
"AN" : ["Autonation", "Automobile"],
"BMW" : ["BMW", "Automobile"],
"DAI" : ["Daimler", "Automobile"],
"GE" : ["General Electric", "Automobile"],
"TSLA" : ["Tesla", "Automobile"],
"DB" : ["Deutsche Bank", "Banking"],
"CBK" : ["Commerzbank", "Banking"],
"ABBV" : ["Abbvie Inc.", "Biotech"],
"ACAD" : ["Acadia Pharmaceuticals Inc.", "Biotech"],
"AMGN" : ["Amgen Inc.", "Biotech"],
"BIIB" : ["Biogen Inc.", "Biotech"],
"CRSP" : ["Crispr Therapeutics AG", "Biotech"],
"10E" : ["Aphria Inc.", "Cannabis"],
"AGEEF": ["Halo Labs Inc.", "Cannabis"],
"CGC" : ["Canopy Growth", "Cannabis"],
"CRON" : ["Cronos Group", "Cannabis"],
"TLRY" : ["Tilray Inc.", "Cannabis"],
"ACB" : ["Aurora Cannabis", "Cannabis"],
"CLDR" : ["Cloudera Inc.", "Cloud"],
"CRM" : ["Salesforce", "Cloud"],
"DOCU" : ["DocuSign", "Cloud"],
"NOW" : ["Service Now", "Cloud"],
"APX" : ["Appen Limited", "Datenverarbeitung"],
"D6H" : ["Datagroup", "Datenverarbeitung"],
"D" : ["Dominion Energy", "Energy"],
"386" : ["Sinopec", "Energy"],
"NEE" : ["NextEra Energy", "Energy"],
"R6C" : ["Royal Dutch Shell", "Energy"],
"DIS" : ["Walt Disney", "Entertainment"],
"AAPL" : ["Apple Inc.", "FANG"],
"AMZN" : ["Amazon Com Inc.", "FANG"],
"FB" : ["Facebook Inc.", "FANG"],
"GOOGL": ["Alphabet Inc.", "FANG"],
"NFLX" : ["Netflix Inc.", "FANG"],
"PYPL" : ["PayPal Inc.", "Fintech"],
"XAUMF": ["Goldmoney Inc.", "Fintech"],
"SQ" : ["Square Inc.", "Fintech"],
"BC" : ["Brunswick Corp.", "Tourism"],
"RCL" : ["Royal Caribbean Group", "Tourism"],
"EA" : ["Electronic Arts Inc.", "Gaming"],
"GME" : ["Gamestop", "Gaming"],
"TCEHY" : ["Tencent Holding LTD", "Gaming"],
"CDW" : ["CDW Corp.", "Trade"],
"AGNC" : ["AGNC Investment Corp.", "Real Estate"],
"CBRE" : ["CBR Group Inc.", "Real Estate"],
"MHK" : ["Mohawk Industries", "Real Estate"],
"EPR" : ["EPR Properties", "Real Estate"],
"PLD" : ["Prologis Inc.", "Real Estate"],
"CFX" : ["Colfax Corporation", "Industry"],
"DOV" : ["Dover Corp.", "Industry"],
"MT" : ["Arcelormittal SA.", "Industry"],
"CLF" : ["Cleveland Cliffs Inc.", "Industry"],
"SLCA" : ["Silicia Holdings Inc.", "Industry"],
"BAK" : ["Braskem", "Industry"],
"TKA" : ["Thyssenkrupp AG", "Industry"],
"SLB" : ["Schlumberger Limited", "Industry"],
"MMM" : ["3M Company", "Industry"],
"9984" : ["Softbank Group", "Investors"],
"BRK.B": ["Berkshire Hathaway Inc.", "Investors"],
"LMT" : ["Lockheed Martin Corp.", "Nuclear Fusion"],
"BATS" : ["British American Tobacco", "Consumer Products"],
"JNJ" : ["Johnson & Johnson", "Consumer Products"],
"NESN" : ["Nestlé", "Consumer Products"],
"PG" : ["Procter & Gamble", "Consumer Products"],
"BTCUSD": ["Bitcoin", "Crypto"],
"ETHUSD": ["Ethereum", "Crypto"],
"IOTUSD": ["Iota", "Crypto"],
"LINKUSDT": ["Chainlink", "Crypto"],
"BYND" : ["Beyond Meat", "Consumer Brands"],
"LVMH" : ["Louis Vuitton", "Consumer Brands"],
"TXN" : ["Texas Instruments", "Consumer Brands"],
"XIACF": ["Xiaomi Corp.", "Consumer Brands"],
"ADS" : ["Adidas AG", "Consumer Brands"],
"CMG" : ["Chipotle Mexican Grill", "Consumer Brands"],
"ISRG" : ["Intuitive Surgical Inc.", "Medi-Tech"],
"ACIA" : ["Acacia Communications Inc.", "Network"],
"BR" : ["Broadridge Financial Solutions", "Network"],
"COMM" : ["Commscope Holdings", "Network"],
"M0Y" : ["Mynaric AG", "Network"],
"BBY" : ["Best Buy", "E-Commerce"],
"BABA" : ["Alibaba Group", "E-Commerce"],
"OCDO" : ["Ocado Group PLC", "E-Commerce"],
"SHOP" : ["Shopify Inc.", "E-Commerce"],
"FVRR" : ["Fiverr LTD", "E-Commerce"],
"ABC" : ["Amerisourcebergen Corp.", "Pharma"],
"ABT" : ["Abbott Laboratories", "Pharma"],
"BAS" : ["BASF", "Pharma"],
"BAYN" : ["Bayer AG", "Pharma"],
"CVS" : ["CVS Healthcare", "Pharma"],
"DVA" : ["Davita Inc.", "Pharma"],
"MCK" : ["Mckesson Corp.", "Pharma"],
"NOVO_B": ["Novo Nordisk", "Pharma"],
"OHI" : ["Omega Healthcare", "Pharma"],
"SPCE" : ["Virgin Galactic Holdings", "Space"],
"UFO" : ["Procure ETF", "Space"],
"AA" : ["Alcoa Corporation", "Raw Materials"],
"MPC" : ["Marathon Petroleum", "Raw Materials"],
"XAUUSD": ["Gold", "Raw Materials"],
"EMPR" : ["Empire Petroleum", "Raw Materials"],
"USOIL": ["WTI Crude Oil", "Raw Materials"],
"LAC" : ["Lithium Americas Corp.", "Raw Materials"],
"PALL" : ["Aberdeen Palladium", "Raw Materials"],
"NNIC" : ["Norilsk Nickel", "Raw Materials"],
"GOLD" : ["Barrick Gold", "Raw Materials"],
"KL" : ["Kirkland Lake Gold", "Raw Materials"],
"AMAT" : ["Applied Materials Inc.", "Tech/ Chips"],
"AMD" : ["Advanced Micro Devices", "Tech/ Chips"],
"AVGO" : ["Broadcom Inc.", "Tech/ Chips"],
"BIDU" : ["Baidu Inc.", "Tech/ Chips"],
"CDNS" : ["Cadence Design Systems", "Tech/ Chips"],
"IFX" : ["Infineon Tech AG", "Tech/ Chips"],
"INTC" : ["Intel Corp.", "Tech/ Chips"],
"NVDA" : ["Nvidia Corp.", "Tech/ Chips"],
"ORCL" : ["Oracle Corp.", "Tech/ Chips"],
"QCOM" : ["Qualcomm Inc.", "Tech/ Chips"],
"SNAP" : ["Snap Inc.", "Tech/ Chips"],
"WB" : ["Weibo Corp.", "Tech/ Chips"],
"WDC" : ["Western Digital Corp.", "Tech/ Chips"],
"MSFT" : ["Microsoft Corp.", "Tech/ Chips"],
"ASML" : ["ASML Holding", "Tech/ Chips"],
"TSM" : ["Taiwan Semiconductor", "Tech/ Chips"],
"2318" : ["Ping An", "Insurance"],
"ACGL" : ["Arch Capital Group", "Insurance"],
"AON" : ["Aon PLC", "Insurance"],
"D7G" : ["Nel Asa", "Hydrogen"],
"BLDP" : ["Ballard Power", "Hydrogen"],
"27W" : ["Powercell Sweden", "Hydrogen"],
"LIN" : ["Linde PLC", "Hydrogen"]}
username = ""
password = ""
options = webdriver.ChromeOptions()
prefs = {'download.default_directory' : '/Users/admin/Desktop/spearmint-vector-student-code/Final_Project/daily_stock_data'}
options.add_experimental_option('prefs', prefs)
driver = webdriver.Chrome(options=options)
driver.get("https://www.tradingview.com/")
driver.implicitly_wait(10)
signin = driver.find_element_by_class_name("tv-header__link.tv-header__link--signin.js-header__signin")
signin.click()
driver.implicitly_wait(5)
email = driver.find_element_by_class_name("tv-signin-dialog__social.tv-signin-dialog__toggle-email.js-show-email")
email.click()
driver.implicitly_wait(4)
user = driver.find_element_by_name("username")
user.send_keys(username)
driver.implicitly_wait(2)
password = driver.find_element_by_name("password")
password.send_keys("")
driver.implicitly_wait(3)
signin2 = driver.find_element_by_class_name("tv-button.tv-button--size_large.tv-button--primary.tv-button--loader")
signin2.click()
time.sleep(5)
element = WebDriverWait(driver, 3).until(
EC.presence_of_element_located((By.CLASS_NAME, "tv-mainmenu__item.tv-mainmenu__item--chart")))
element.click()
time.sleep(10)
for key in stock_dictionary:
search = driver.find_element_by_class_name("button-2ioYhFEY.button-o2NxAp-P.apply-common-tooltip.isInteractive-20uLObIc")
search.click()
driver.implicitly_wait(3)
search2 = driver.find_element_by_class_name("search-2XsDfq16.upperCase-UYMmoP0p.input-2pz7DtzH")
search2.clear()
search2.send_keys(key)
search2.send_keys(Keys.RETURN)
driver.implicitly_wait(5)
menu = driver.find_element_by_class_name("button-9U4gleap.button-2ioYhFEY.apply-common-tooltip.isInteractive-20uLObIc")
menu.click()
driver.implicitly_wait(3)
export = driver.find_element_by_class_name("apply-common-tooltip.common-tooltip-vertical.item-2xPVYue0.item-1dXqixrD")
export.click()
driver.implicitly_wait(3)
chart = driver.find_element_by_xpath("(//*[@class='selected-2IjEMdXr'])[1]")
chart.click()
driver.implicitly_wait(2)
daily = driver.find_element_by_xpath("(//*[@class='label-3Xqxy756'])[2]")
daily.click()
driver.implicitly_wait(2)
t_format = driver.find_element_by_xpath("(//*[@class='selected-2IjEMdXr'])[2]")
t_format.click()
driver.implicitly_wait(3)
iso = driver.find_element_by_xpath("(//*[@class='labelRow-3Q0rdE8-'])[1]")
iso.click()
driver.implicitly_wait(3)
download = driver.find_element_by_name("submit")
download.click()
driver.quit()
| 0.376852 | 0.245707 |
### Note
* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
```
# Dependencies and Setup
import pandas as pd
# File to Load (Remember to Change These)
school_data_to_load = "schools_complete.csv"
student_data_to_load = "students_complete.csv"
# Read School and Student Data File and store into Pandas Data Frames
school_data = pd.read_csv(school_data_to_load)
student_data = pd.read_csv(student_data_to_load)
# Combine the data into a single dataset
school_data_complete = pd.merge(student_data, school_data, how="left", on=["school_name", "school_name"])
school_data_complete.head()
```
## District Summary
* Calculate the total number of schools
* Calculate the total number of students
* Calculate the total budget
* Calculate the average math score
* Calculate the average reading score
* Calculate the overall passing rate (overall average score), i.e. (avg. math score + avg. reading score)/2
* Calculate the percentage of students with a passing math score (70 or greater)
* Calculate the percentage of students with a passing reading score (70 or greater)
* Create a dataframe to hold the above results
* Optional: give the displayed data cleaner formatting
```
tot_schools = school_data["school_name"].count()
tot_students = student_data["student_name"].count()
tot_budget = school_data["budget"].sum()
avg_math = student_data["math_score"].mean()
avg_reading = student_data["reading_score"].mean()
pass_math = student_data.loc[student_data["math_score"] >= 70]
pass_math_count = pass_math["student_name"].count()
pass_reading = student_data.loc[student_data["reading_score"] >= 70]
pass_reading_count = pass_reading["student_name"].count()
overall_pass = ((pass_math_count + pass_reading_count)/(2*tot_students))
summary_table = pd.DataFrame({"Total Schools": tot_schools,
"Total Students": [tot_students],
"Total Budget": [tot_budget],
"Average Math Score": [avg_math],
"Average Reading Score": [avg_reading],
"% Passing Math": [(pass_math_count/tot_students)*100],
"% Passing Reading": [(pass_reading_count/tot_students)*100],
"%Overall Passing Rate": [overall_pass*100] })
summary_table["Total Students"] = summary_table["Total Students"].astype(float).map("{:,.0f}".format)
summary_table["Total Budget"] = summary_table["Total Budget"].astype(float).map("${:,.2f}".format)
summary_table
```
## School Summary
* Create an overview table that summarizes key metrics about each school, including:
* School Name
* School Type
* Total Students
* Total School Budget
* Per Student Budget
* Average Math Score
* Average Reading Score
* % Passing Math
* % Passing Reading
* Overall Passing Rate (Average of the above two)
* Create a dataframe to hold the above results
## Top Performing Schools (By Passing Rate)
* Sort and display the top five schools in overall passing rate
```
group_school = school_data_complete.groupby("school_name")
#school_type = group_school["type"].unique()
students = group_school["student_name"].count()
school_budget = group_school["budget"].mean()
school_type = [s_type[0] for s_type in group_school["type"].unique()]
pass_math_school = school_data_complete.loc[school_data_complete["math_score"] >= 70]
math_group = pass_math_school.groupby("school_name")
pass_reading_school = school_data_complete.loc[school_data_complete["reading_score"] >= 70]
reading_group = pass_reading_school.groupby("school_name")
school_summary_table = pd.DataFrame({"School Type": school_type,
"Total Students": students,
"Total School Budget": school_budget,
"Per Student Budget": (group_school["budget"].mean())/(group_school["student_name"].count()),
"Average Math Score": (group_school["math_score"].mean()),
"Average Reading Score": (group_school["reading_score"].mean()),
"% Passing Math": (math_group["student_name"].count())/(group_school["student_name"].count())*100,
"% Passing Reading": (reading_group["student_name"].count())/(group_school["student_name"].count())*100,
"% Overall Passing Rate": (reading_group["student_name"].count()+math_group["student_name"].count())/(2*group_school["student_name"].count())*100
})
school_summary_table["Per Student Budget"] = school_summary_table["Per Student Budget"].astype(float).map("${:,.2f}".format)
school_summary_table["Total School Budget"] = school_summary_table["Total School Budget"].astype(float).map("${:,.2f}".format)
best_schools = school_summary_table.sort_values(["% Overall Passing Rate"], ascending=False)
best_schools.reset_index(inplace=False)
best_schools.index.name = None
best_schools.head()
```
## Bottom Performing Schools (By Passing Rate)
* Sort and display the five worst-performing schools
```
worst_schools = school_summary_table.sort_values(["% Overall Passing Rate"], ascending=True)
worst_schools.reset_index(inplace=False)
worst_schools.index.name = None
worst_schools.head()
```
## Math Scores by Grade
* Create a table that lists the average Reading Score for students of each grade level (9th, 10th, 11th, 12th) at each school.
* Create a pandas series for each grade. Hint: use a conditional statement.
* Group each series by school
* Combine the series into a dataframe
* Optional: give the displayed data cleaner formatting
```
freshmen = school_data_complete.loc[school_data_complete["grade"] == "9th"]
frosh_group = freshmen.groupby("school_name")
frosh_avg = frosh_group.mean()
sophomore = school_data_complete.loc[school_data_complete["grade"] == "10th"]
soph_group = sophomore.groupby("school_name")
soph_avg = soph_group.mean()
juniors = school_data_complete.loc[school_data_complete["grade"] == "11th"]
jrs_group = juniors.groupby("school_name")
jrs_avg = jrs_group.mean()
seniors = school_data_complete.loc[school_data_complete["grade"] == "12th"]
srs_group = seniors.groupby("school_name")
srs_avg = srs_group.mean()
grade_summary_under = pd.merge(frosh_avg, soph_avg, on="school_name", suffixes=("_fr", "_soph"))
grade_summary_upper = pd.merge(jrs_avg, srs_avg, on="school_name", suffixes=("_jrs", "_srs"))
grade_summary_total = pd.merge(grade_summary_under, grade_summary_upper, on="school_name")
just_math = grade_summary_total[["math_score_fr", "math_score_soph", "math_score_jrs", "math_score_srs"]]
renamed_math = just_math.rename(columns={"math_score_fr": "9th", "math_score_soph": "10th", "math_score_jrs": "11th", "math_score_srs": "12th"})
renamed_math.index.name = None
renamed_math
```
## Reading Score by Grade
* Perform the same operations as above for reading scores
```
just_reading = grade_summary_total[["reading_score_fr", "reading_score_soph", "reading_score_jrs", "reading_score_srs"]]
renamed_reading = just_reading.rename(columns={"reading_score_fr": "9th", "reading_score_soph": "10th", "reading_score_jrs": "11th", "reading_score_srs": "12th"})
renamed_reading.index.name = None
renamed_reading
```
## Scores by School Spending
* Create a table that breaks down school performances based on average Spending Ranges (Per Student). Use 4 reasonable bins to group school spending. Include in the table each of the following:
* Average Math Score
* Average Reading Score
* % Passing Math
* % Passing Reading
* Overall Passing Rate (Average of the above two)
```
# Sample bins. Feel free to create your own bins.
spending_bins = [0, 585, 615, 645, 675]
group_names = ["<$585", "$585-615", "$615-645", "$645-675"]
summary_table_bins = pd.DataFrame({
"Per Student Budget": (group_school["budget"].mean())/(group_school["student_name"].count()),
"Average Math Score": (group_school["math_score"].mean()),
"Average Reading Score": (group_school["reading_score"].mean()),
"% Passing Math": (math_group["student_name"].count())/(group_school["student_name"].count())*100,
"% Passing Reading": (reading_group["student_name"].count())/(group_school["student_name"].count())*100,
"% Overall Passing Rate": (reading_group["student_name"].count()+math_group["student_name"].count())/(2*group_school["student_name"].count())*100
})
summary_table_bins["Spending Ranges(Per Student)"] = pd.cut(summary_table_bins["Per Student Budget"], spending_bins, labels=group_names)
budget_group = summary_table_bins.groupby("Spending Ranges(Per Student)").mean()
no_psb = budget_group[["Average Math Score", "Average Reading Score", "% Passing Math", "% Passing Reading", "% Overall Passing Rate"]]
no_psb.head()
```
## Scores by School Size
* Perform the same operations as above, based on school size.
```
# Sample bins. Feel free to create your own bins.
size_bins = [0, 1000, 2000, 5000]
group_names = ["Small (<1000)", "Medium (1000-2000)", "Large (2000-5000)"]
size_summary_table = pd.DataFrame({
"Total Students": group_school["student_name"].count(),
"Average Math Score": (group_school["math_score"].mean()),
"Average Reading Score": (group_school["reading_score"].mean()),
"% Passing Math": (math_group["student_name"].count())/(group_school["student_name"].count())*100,
"% Passing Reading": (reading_group["student_name"].count())/(group_school["student_name"].count())*100,
"% Overall Passing Rate": (reading_group["student_name"].count()+math_group["student_name"].count())/(2*group_school["student_name"].count())*100
})
size_summary_table["School Size"] = pd.cut(size_summary_table["Total Students"], size_bins, labels=group_names)
size_group = size_summary_table.groupby("School Size").mean()
no_ts = size_group[["Average Math Score", "Average Reading Score", "% Passing Math", "% Passing Reading", "% Overall Passing Rate"]]
no_ts.head()
```
## Scores by School Type
* Perform the same operations as above, based on school type.
```
#school_data_complete
type_data = school_data_complete.groupby("type")
type_data.head()
students_type = type_data["student_name"].count()
#pass_math_school = school_data_complete.loc[school_data_complete["math_score"] >= 70]
math_type = pass_math_school.groupby("type")
#pass_reading_school = school_data_complete.loc[school_data_complete["reading_score"] >= 70]
reading_type = pass_reading_school.groupby("type")
type_summary_table = pd.DataFrame({
"Average Math Score": (type_data["math_score"].mean()),
"Average Reading Score": (type_data["reading_score"].mean()),
"% Passing Math": (math_type["student_name"].count())/(type_data["student_name"].count())*100,
"% Passing Reading": (reading_type["student_name"].count())/(type_data["student_name"].count())*100,
"% Overall Passing Rate": (reading_type["student_name"].count()+math_type["student_name"].count())/(2*type_data["student_name"].count())*100
})
type_summary_table
writer = pd.ExcelWriter('PyCitySchools.xlsx', engine='xlsxwriter')
school_data_complete.to_excel(writer, sheet_name='Sheet1')
summary_table.to_excel(writer, sheet_name='Sheet2')
best_schools.to_excel(writer, sheet_name='Sheet3')
worst_schools.to_excel(writer, sheet_name='Sheet4')
renamed_math.to_excel(writer, sheet_name='Sheet5')
renamed_reading.to_excel(writer, sheet_name='Sheet6')
no_psb.to_excel(writer, sheet_name='Sheet7')
no_ts.to_excel(writer, sheet_name='Sheet8')
type_summary_table.to_excel(writer, sheet_name='Sheet9')
writer.save()
```
|
github_jupyter
|
# Dependencies and Setup
import pandas as pd
# File to Load (Remember to Change These)
school_data_to_load = "schools_complete.csv"
student_data_to_load = "students_complete.csv"
# Read School and Student Data File and store into Pandas Data Frames
school_data = pd.read_csv(school_data_to_load)
student_data = pd.read_csv(student_data_to_load)
# Combine the data into a single dataset
school_data_complete = pd.merge(student_data, school_data, how="left", on=["school_name", "school_name"])
school_data_complete.head()
tot_schools = school_data["school_name"].count()
tot_students = student_data["student_name"].count()
tot_budget = school_data["budget"].sum()
avg_math = student_data["math_score"].mean()
avg_reading = student_data["reading_score"].mean()
pass_math = student_data.loc[student_data["math_score"] >= 70]
pass_math_count = pass_math["student_name"].count()
pass_reading = student_data.loc[student_data["reading_score"] >= 70]
pass_reading_count = pass_reading["student_name"].count()
overall_pass = ((pass_math_count + pass_reading_count)/(2*tot_students))
summary_table = pd.DataFrame({"Total Schools": tot_schools,
"Total Students": [tot_students],
"Total Budget": [tot_budget],
"Average Math Score": [avg_math],
"Average Reading Score": [avg_reading],
"% Passing Math": [(pass_math_count/tot_students)*100],
"% Passing Reading": [(pass_reading_count/tot_students)*100],
"%Overall Passing Rate": [overall_pass*100] })
summary_table["Total Students"] = summary_table["Total Students"].astype(float).map("{:,.0f}".format)
summary_table["Total Budget"] = summary_table["Total Budget"].astype(float).map("${:,.2f}".format)
summary_table
group_school = school_data_complete.groupby("school_name")
#school_type = group_school["type"].unique()
students = group_school["student_name"].count()
school_budget = group_school["budget"].mean()
school_type = [s_type[0] for s_type in group_school["type"].unique()]
pass_math_school = school_data_complete.loc[school_data_complete["math_score"] >= 70]
math_group = pass_math_school.groupby("school_name")
pass_reading_school = school_data_complete.loc[school_data_complete["reading_score"] >= 70]
reading_group = pass_reading_school.groupby("school_name")
school_summary_table = pd.DataFrame({"School Type": school_type,
"Total Students": students,
"Total School Budget": school_budget,
"Per Student Budget": (group_school["budget"].mean())/(group_school["student_name"].count()),
"Average Math Score": (group_school["math_score"].mean()),
"Average Reading Score": (group_school["reading_score"].mean()),
"% Passing Math": (math_group["student_name"].count())/(group_school["student_name"].count())*100,
"% Passing Reading": (reading_group["student_name"].count())/(group_school["student_name"].count())*100,
"% Overall Passing Rate": (reading_group["student_name"].count()+math_group["student_name"].count())/(2*group_school["student_name"].count())*100
})
school_summary_table["Per Student Budget"] = school_summary_table["Per Student Budget"].astype(float).map("${:,.2f}".format)
school_summary_table["Total School Budget"] = school_summary_table["Total School Budget"].astype(float).map("${:,.2f}".format)
best_schools = school_summary_table.sort_values(["% Overall Passing Rate"], ascending=False)
best_schools.reset_index(inplace=False)
best_schools.index.name = None
best_schools.head()
worst_schools = school_summary_table.sort_values(["% Overall Passing Rate"], ascending=True)
worst_schools.reset_index(inplace=False)
worst_schools.index.name = None
worst_schools.head()
freshmen = school_data_complete.loc[school_data_complete["grade"] == "9th"]
frosh_group = freshmen.groupby("school_name")
frosh_avg = frosh_group.mean()
sophomore = school_data_complete.loc[school_data_complete["grade"] == "10th"]
soph_group = sophomore.groupby("school_name")
soph_avg = soph_group.mean()
juniors = school_data_complete.loc[school_data_complete["grade"] == "11th"]
jrs_group = juniors.groupby("school_name")
jrs_avg = jrs_group.mean()
seniors = school_data_complete.loc[school_data_complete["grade"] == "12th"]
srs_group = seniors.groupby("school_name")
srs_avg = srs_group.mean()
grade_summary_under = pd.merge(frosh_avg, soph_avg, on="school_name", suffixes=("_fr", "_soph"))
grade_summary_upper = pd.merge(jrs_avg, srs_avg, on="school_name", suffixes=("_jrs", "_srs"))
grade_summary_total = pd.merge(grade_summary_under, grade_summary_upper, on="school_name")
just_math = grade_summary_total[["math_score_fr", "math_score_soph", "math_score_jrs", "math_score_srs"]]
renamed_math = just_math.rename(columns={"math_score_fr": "9th", "math_score_soph": "10th", "math_score_jrs": "11th", "math_score_srs": "12th"})
renamed_math.index.name = None
renamed_math
just_reading = grade_summary_total[["reading_score_fr", "reading_score_soph", "reading_score_jrs", "reading_score_srs"]]
renamed_reading = just_reading.rename(columns={"reading_score_fr": "9th", "reading_score_soph": "10th", "reading_score_jrs": "11th", "reading_score_srs": "12th"})
renamed_reading.index.name = None
renamed_reading
# Sample bins. Feel free to create your own bins.
spending_bins = [0, 585, 615, 645, 675]
group_names = ["<$585", "$585-615", "$615-645", "$645-675"]
summary_table_bins = pd.DataFrame({
"Per Student Budget": (group_school["budget"].mean())/(group_school["student_name"].count()),
"Average Math Score": (group_school["math_score"].mean()),
"Average Reading Score": (group_school["reading_score"].mean()),
"% Passing Math": (math_group["student_name"].count())/(group_school["student_name"].count())*100,
"% Passing Reading": (reading_group["student_name"].count())/(group_school["student_name"].count())*100,
"% Overall Passing Rate": (reading_group["student_name"].count()+math_group["student_name"].count())/(2*group_school["student_name"].count())*100
})
summary_table_bins["Spending Ranges(Per Student)"] = pd.cut(summary_table_bins["Per Student Budget"], spending_bins, labels=group_names)
budget_group = summary_table_bins.groupby("Spending Ranges(Per Student)").mean()
no_psb = budget_group[["Average Math Score", "Average Reading Score", "% Passing Math", "% Passing Reading", "% Overall Passing Rate"]]
no_psb.head()
# Sample bins. Feel free to create your own bins.
size_bins = [0, 1000, 2000, 5000]
group_names = ["Small (<1000)", "Medium (1000-2000)", "Large (2000-5000)"]
size_summary_table = pd.DataFrame({
"Total Students": group_school["student_name"].count(),
"Average Math Score": (group_school["math_score"].mean()),
"Average Reading Score": (group_school["reading_score"].mean()),
"% Passing Math": (math_group["student_name"].count())/(group_school["student_name"].count())*100,
"% Passing Reading": (reading_group["student_name"].count())/(group_school["student_name"].count())*100,
"% Overall Passing Rate": (reading_group["student_name"].count()+math_group["student_name"].count())/(2*group_school["student_name"].count())*100
})
size_summary_table["School Size"] = pd.cut(size_summary_table["Total Students"], size_bins, labels=group_names)
size_group = size_summary_table.groupby("School Size").mean()
no_ts = size_group[["Average Math Score", "Average Reading Score", "% Passing Math", "% Passing Reading", "% Overall Passing Rate"]]
no_ts.head()
#school_data_complete
type_data = school_data_complete.groupby("type")
type_data.head()
students_type = type_data["student_name"].count()
#pass_math_school = school_data_complete.loc[school_data_complete["math_score"] >= 70]
math_type = pass_math_school.groupby("type")
#pass_reading_school = school_data_complete.loc[school_data_complete["reading_score"] >= 70]
reading_type = pass_reading_school.groupby("type")
type_summary_table = pd.DataFrame({
"Average Math Score": (type_data["math_score"].mean()),
"Average Reading Score": (type_data["reading_score"].mean()),
"% Passing Math": (math_type["student_name"].count())/(type_data["student_name"].count())*100,
"% Passing Reading": (reading_type["student_name"].count())/(type_data["student_name"].count())*100,
"% Overall Passing Rate": (reading_type["student_name"].count()+math_type["student_name"].count())/(2*type_data["student_name"].count())*100
})
type_summary_table
writer = pd.ExcelWriter('PyCitySchools.xlsx', engine='xlsxwriter')
school_data_complete.to_excel(writer, sheet_name='Sheet1')
summary_table.to_excel(writer, sheet_name='Sheet2')
best_schools.to_excel(writer, sheet_name='Sheet3')
worst_schools.to_excel(writer, sheet_name='Sheet4')
renamed_math.to_excel(writer, sheet_name='Sheet5')
renamed_reading.to_excel(writer, sheet_name='Sheet6')
no_psb.to_excel(writer, sheet_name='Sheet7')
no_ts.to_excel(writer, sheet_name='Sheet8')
type_summary_table.to_excel(writer, sheet_name='Sheet9')
writer.save()
| 0.386185 | 0.879768 |
# Graphics.
```
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import pandas as pd
from numpy import (
array,
column_stack,
dot,
hstack,
logical_and,
mean,
ones,
sqrt,
squeeze,
var,
zeros,
)
from numpy.linalg import lstsq
# Loading and transforming the data
sp500_data = pd.read_csv("sp500.csv", index_col="Date", parse_dates=True)
ftse_data = pd.read_csv("ftse.csv", index_col="Date", parse_dates=True)
sp500 = sp500_data["Adj Close"]
sp500_dates = sp500_data.index
ftse = ftse_data["Adj Close"]
ftse_dates = ftse_data.index
```
## Exercise 1
```
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(sp500_dates, sp500)
fmt = mdates.DateFormatter("%Y")
ax.xaxis.set_major_formatter(fmt)
fig.autofmt_xdate()
plt.draw()
```
## Exercise 2
```
sp500_rets = sp500.resample("W-FRI").last().pct_change().dropna()
ftse_rets = ftse.resample("W-FRI").last().pct_change().dropna()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(sp500_rets)
plt.draw()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(sp500_rets, bins=20)
plt.draw()
```
## Exercise 3
```
bands = [(-1.0, -0.02), (-0.02, 0.0), (0.0, 0.02), (0.02, 1.0)]
percs = zeros(len(bands))
i = 0
for b in bands:
percs[i] = mean(logical_and(sp500_rets > b[0], sp500_rets <= b[1]))
i += 1
fig = plt.figure()
ax = fig.add_subplot(111)
labels = ["<-2%", ">-2% and <0", ">0 and <2%", ">2%"]
ax.pie(percs, labels=labels)
plt.draw()
```
## Exercise 4
```
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(sp500_rets, ftse_rets)
ax.set_xlabel("S&P 500 returns")
ax.set_ylabel("ftse 100 returns")
plt.draw()
```
## Exercise 5
```
x = column_stack((ones(sp500_rets.shape), sp500_rets))
y = column_stack((ftse_rets,))
x, y
out = lstsq(x, y, rcond=None)
b = out[0]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(sp500_rets, ftse_rets)
ax.set_xlabel("S&P 500 returns")
ax.set_ylabel("FTSE 100 returns")
x = ax.get_xlim()
x = array(x)
x.shape = -1, 1
x = hstack((ones(x.shape), x))
fit = dot(x, b)
b = squeeze(b)
ax.plot(x[:, 1], fit, color="#800000")
ax.text(0, -0.15, f"y = {b[0]:0.2f} + {b[1]:0.2f}x")
plt.draw()
```
## Exercise 6
```
T = sp500_rets.size
ewma = zeros((T, 2))
r = column_stack((sp500_rets, ftse_rets))
ewma[0] = var(r, axis=0)
for i in range(1, T):
ewma[i] = 0.97 * ewma[i - 1] + 0.03 * r[i - 1] ** 2
ewma = 100 * sqrt(252 * ewma)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(sp500_rets.index, ewma[:, 0], label="S&P 500 ewma Vol (Annualized)")
ax.plot(ftse_rets.index, ewma[:, 1], label="ftse 100 ewma Vol (Annualized)")
ax.legend(loc=0)
ax.set_title("Annualized Volatility (%)")
fmt = mdates.DateFormatter("%Y")
ax.xaxis.set_major_formatter(fmt)
fig.autofmt_xdate()
plt.show()
```
|
github_jupyter
|
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import pandas as pd
from numpy import (
array,
column_stack,
dot,
hstack,
logical_and,
mean,
ones,
sqrt,
squeeze,
var,
zeros,
)
from numpy.linalg import lstsq
# Loading and transforming the data
sp500_data = pd.read_csv("sp500.csv", index_col="Date", parse_dates=True)
ftse_data = pd.read_csv("ftse.csv", index_col="Date", parse_dates=True)
sp500 = sp500_data["Adj Close"]
sp500_dates = sp500_data.index
ftse = ftse_data["Adj Close"]
ftse_dates = ftse_data.index
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(sp500_dates, sp500)
fmt = mdates.DateFormatter("%Y")
ax.xaxis.set_major_formatter(fmt)
fig.autofmt_xdate()
plt.draw()
sp500_rets = sp500.resample("W-FRI").last().pct_change().dropna()
ftse_rets = ftse.resample("W-FRI").last().pct_change().dropna()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(sp500_rets)
plt.draw()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(sp500_rets, bins=20)
plt.draw()
bands = [(-1.0, -0.02), (-0.02, 0.0), (0.0, 0.02), (0.02, 1.0)]
percs = zeros(len(bands))
i = 0
for b in bands:
percs[i] = mean(logical_and(sp500_rets > b[0], sp500_rets <= b[1]))
i += 1
fig = plt.figure()
ax = fig.add_subplot(111)
labels = ["<-2%", ">-2% and <0", ">0 and <2%", ">2%"]
ax.pie(percs, labels=labels)
plt.draw()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(sp500_rets, ftse_rets)
ax.set_xlabel("S&P 500 returns")
ax.set_ylabel("ftse 100 returns")
plt.draw()
x = column_stack((ones(sp500_rets.shape), sp500_rets))
y = column_stack((ftse_rets,))
x, y
out = lstsq(x, y, rcond=None)
b = out[0]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(sp500_rets, ftse_rets)
ax.set_xlabel("S&P 500 returns")
ax.set_ylabel("FTSE 100 returns")
x = ax.get_xlim()
x = array(x)
x.shape = -1, 1
x = hstack((ones(x.shape), x))
fit = dot(x, b)
b = squeeze(b)
ax.plot(x[:, 1], fit, color="#800000")
ax.text(0, -0.15, f"y = {b[0]:0.2f} + {b[1]:0.2f}x")
plt.draw()
T = sp500_rets.size
ewma = zeros((T, 2))
r = column_stack((sp500_rets, ftse_rets))
ewma[0] = var(r, axis=0)
for i in range(1, T):
ewma[i] = 0.97 * ewma[i - 1] + 0.03 * r[i - 1] ** 2
ewma = 100 * sqrt(252 * ewma)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(sp500_rets.index, ewma[:, 0], label="S&P 500 ewma Vol (Annualized)")
ax.plot(ftse_rets.index, ewma[:, 1], label="ftse 100 ewma Vol (Annualized)")
ax.legend(loc=0)
ax.set_title("Annualized Volatility (%)")
fmt = mdates.DateFormatter("%Y")
ax.xaxis.set_major_formatter(fmt)
fig.autofmt_xdate()
plt.show()
| 0.442396 | 0.902867 |
# SELECT within SELECT
```
import os
import pandas as pd
import findspark
os.environ['SPARK_HOME'] = '/opt/spark'
findspark.init()
from pyspark.sql import SparkSession
sc = (SparkSession.builder.appName('app04')
.config('spark.sql.warehouse.dir', 'hdfs://quickstart.cloudera:8020/user/hive/warehouse')
.config('hive.metastore.uris', 'thrift://quickstart.cloudera:9083')
.enableHiveSupport().getOrCreate())
```
## 1. Bigger than Russia
**List each country name where the population is larger than that of 'Russia'.**
```
world(name, continent, area, population, gdp)
```
```
world = sc.read.table('sqlzoo.world')
bm = world.filter(world['name']=='Russia').select('population').collect()[0][0]
world.filter(world['population'] > bm).select('name').toPandas()
```
## 2. Richer than UK
**Show the countries in Europe with a per capita GDP greater than 'United Kingdom'.**
> _Per Capita GDP_
> The per capita GDP is the gdp/population
```
a = world.withColumn('pcgdp', world['gdp']/world['population'])
bm = a.filter(a['name']=='United Kingdom').select('pcgdp').head()[0]
a.filter((a['pcgdp']>bm) & (a['continent']=='Europe')).select('name').toPandas()
```
## 3. Neighbours of Argentina and Australia
List the name and continent of countries in the continents containing either Argentina or Australia. Order by name of the country.
```
bm = [x.continent for x in world.filter(world['name'].isin(
['Argentina', 'Australia'])).select('continent').collect()]
world.filter(world['continent'].isin(bm)).select('name', 'continent').toPandas()
```
## 4. Between Canada and Poland
Which country has a population that is more than Canada but less than Poland? Show the name and the population.
```
popl_can = world.filter(world['name']=='Canada').select('population').head()[0]
popl_pol = world.filter(world['name']=='Poland').select('population').head()[0]
(world.filter((world['population'] > popl_can) & (world['population'] < popl_pol))
.select('name', 'population').toPandas())
```
## 5. Percentages of Germany
Germany (population 80 million) has the largest population of the countries in Europe. Austria (population 8.5 million) has 11% of the population of Germany.
**Show the name and the population of each country in Europe. Show the population as a percentage of the population of Germany.**
The format should be Name, Percentage for example:
name | percentage
--------|-----------
Albania | 3%
Andorra | 0%
Austria | 11%
... | ...
> _Decimal places_
> You can use the function ROUND to remove the decimal places.
> _Percent symbol %_
> You can use the function CONCAT to add the percentage symbol.
[To get a well rounded view of the important features of SQL you should move on to the next tutorial concerning aggregates.](https://sqlzoo.net/wiki/SUM_and_COUNT)
To gain an absurdly detailed view of one insignificant feature of the language, read on.
We can use the word `ALL` to allow >= or > or < or <=to act over a list. For example, you can find the largest country in the world, by population with this query:
```sql
SELECT name
FROM world
WHERE population >= ALL(SELECT population
FROM world
WHERE population>0)
```
You need the condition **population>0** in the sub-query as some countries have **null** for population.
```
from pyspark.sql.functions import round, format_string
bm = world.filter(world['name']=='Germany').select('population').head()[0]
(world.filter(world['continent']=='Europe')
.withColumn('pct', format_string('%d%%', round(100*world['population']/bm, 0).cast('int')))
.select('name', 'pct').toPandas())
```
## 6. Bigger than every country in Europe
Which countries have a GDP greater than every country in Europe? [Give the name only.] (Some countries may have NULL gdp values)
We can refer to values in the outer SELECT within the inner SELECT. We can name the tables so that we can tell the difference between the inner and outer versions.
```
bm = world.filter(world['continent']=='Europe').agg({'gdp': 'max'}).head()[0]
world.filter(world['gdp']>bm).select('name').toPandas()
```
## 7. Largest in each continent
**Find the largest country (by area) in each continent, show the continent, the name and the area:**
```sql
SELECT continent, name, population FROM world x
WHERE population >= ALL
(SELECT population FROM world y
WHERE y.continent=x.continent
AND population>0)
```
> __The above example is known as a correlated or synchronized sub-query.__
>
> Using correlated subqueries
> A correlated subquery works like a nested loop: the subquery only has access to rows related to a single > record at a time in the outer query. The technique relies on table aliases to identify two different uses of the same table, one in the outer query and the other in the subquery.
>
> One way to interpret the line in the **WHERE** clause that references the two table is _“… where the correlated values are the same”._
>
> In the example provided, you would say _“select the country details from world where the population is greater than or equal to the population of all countries where the continent is the same”._
```
max_area = (world.groupBy('continent')
.agg({'area': 'max'})
.withColumnRenamed('max(area)', 'area'))
(world.join(max_area, ['continent', 'area'], 'inner')
.select('continent', 'name', 'area')
.toPandas())
```
## 8. First country of each continent (alphabetically)
**List each continent and the name of the country that comes first alphabetically.**
```
(world.groupBy('continent')
.agg({'name': 'min'})
.withColumnRenamed('min(name)', 'name')
.toPandas())
```
## 9. Difficult Questions That Utilize Techniques Not Covered In Prior Sections
**Find the continents where all countries have a population <= 25000000. Then find the names of the countries associated with these continents. Show name, continent and population.**
```
bm = [x.continent for x in world.filter(world['population']>2.5e7).select('continent').distinct().collect()]
world.filter(~world['continent'].isin(bm)).select('name', 'continent', 'population').toPandas()
```
## 10.
**Some countries have populations more than three times that of any of their neighbours (in the same continent). Give the countries and continents.**
```
from pyspark.sql.functions import isnull
# how many neighbors are there for each continent
bm = world.groupBy('continent').count()
bm = (bm.withColumn('count', bm['count']-1)
.withColumnRenamed('count', 'nbhrs'))
# compare with tripple population
a = world.join((world
.withColumn('popl3', 3*world['population'])
.select('continent', 'name', 'popl3')
.withColumnRenamed('name', 'name_y')),
on='continent', how='outer')
b = (a.filter(a['population'] > a['popl3'])
.select('continent', 'name', 'name_y')
.groupBy([a.continent, a.name])
.count()
.join(bm, on='continent', how='left'))
b.filter(b['count']==b['nbhrs']).select('name', 'continent').toPandas()
sc.stop()
```
|
github_jupyter
|
import os
import pandas as pd
import findspark
os.environ['SPARK_HOME'] = '/opt/spark'
findspark.init()
from pyspark.sql import SparkSession
sc = (SparkSession.builder.appName('app04')
.config('spark.sql.warehouse.dir', 'hdfs://quickstart.cloudera:8020/user/hive/warehouse')
.config('hive.metastore.uris', 'thrift://quickstart.cloudera:9083')
.enableHiveSupport().getOrCreate())
world(name, continent, area, population, gdp)
world = sc.read.table('sqlzoo.world')
bm = world.filter(world['name']=='Russia').select('population').collect()[0][0]
world.filter(world['population'] > bm).select('name').toPandas()
a = world.withColumn('pcgdp', world['gdp']/world['population'])
bm = a.filter(a['name']=='United Kingdom').select('pcgdp').head()[0]
a.filter((a['pcgdp']>bm) & (a['continent']=='Europe')).select('name').toPandas()
bm = [x.continent for x in world.filter(world['name'].isin(
['Argentina', 'Australia'])).select('continent').collect()]
world.filter(world['continent'].isin(bm)).select('name', 'continent').toPandas()
popl_can = world.filter(world['name']=='Canada').select('population').head()[0]
popl_pol = world.filter(world['name']=='Poland').select('population').head()[0]
(world.filter((world['population'] > popl_can) & (world['population'] < popl_pol))
.select('name', 'population').toPandas())
SELECT name
FROM world
WHERE population >= ALL(SELECT population
FROM world
WHERE population>0)
from pyspark.sql.functions import round, format_string
bm = world.filter(world['name']=='Germany').select('population').head()[0]
(world.filter(world['continent']=='Europe')
.withColumn('pct', format_string('%d%%', round(100*world['population']/bm, 0).cast('int')))
.select('name', 'pct').toPandas())
bm = world.filter(world['continent']=='Europe').agg({'gdp': 'max'}).head()[0]
world.filter(world['gdp']>bm).select('name').toPandas()
SELECT continent, name, population FROM world x
WHERE population >= ALL
(SELECT population FROM world y
WHERE y.continent=x.continent
AND population>0)
max_area = (world.groupBy('continent')
.agg({'area': 'max'})
.withColumnRenamed('max(area)', 'area'))
(world.join(max_area, ['continent', 'area'], 'inner')
.select('continent', 'name', 'area')
.toPandas())
(world.groupBy('continent')
.agg({'name': 'min'})
.withColumnRenamed('min(name)', 'name')
.toPandas())
bm = [x.continent for x in world.filter(world['population']>2.5e7).select('continent').distinct().collect()]
world.filter(~world['continent'].isin(bm)).select('name', 'continent', 'population').toPandas()
from pyspark.sql.functions import isnull
# how many neighbors are there for each continent
bm = world.groupBy('continent').count()
bm = (bm.withColumn('count', bm['count']-1)
.withColumnRenamed('count', 'nbhrs'))
# compare with tripple population
a = world.join((world
.withColumn('popl3', 3*world['population'])
.select('continent', 'name', 'popl3')
.withColumnRenamed('name', 'name_y')),
on='continent', how='outer')
b = (a.filter(a['population'] > a['popl3'])
.select('continent', 'name', 'name_y')
.groupBy([a.continent, a.name])
.count()
.join(bm, on='continent', how='left'))
b.filter(b['count']==b['nbhrs']).select('name', 'continent').toPandas()
sc.stop()
| 0.25174 | 0.838944 |
###利用bike-sharing数据集构建回归模型
首先加载和查看数据集
```
for line in open('data/Readme.txt').readlines():
print line.strip()
rawdata = sc.textFile('data/hour.csv')
data = rawdata.map(lambda x:x.split(','))
for i in data.take(5):
print i
```
可以看到第一行为标签,第一列instance为序号可以删除,第二列dteday为日期,信息已包含在之后的yr,mnth等变量中,可以删除。此次预测只预测总数量,因此去除casual和registered。剩下的变量分别为:季节,年,月,小时,节假日,星期几,工作日,天气,温度,体感温度,湿度,风速。其中后四个变量已标准化。
```
data = data.filter(lambda x: x[0] != 'instant')
data.cache()
print data.count()
```
我打算用线性回归和决策树两种方法来尝试拟合数据集,在线性回归中,对类型变量进行二元编码。(决策树中不需要)建立辅助函数并转换数据。
```
def binary_encode(rdd, i):
return (rdd.
map(lambda x :x[i])
.distinct()
.zipWithIndex()
.collectAsMap())
#对第2-9列应用该函数得到转换的字典
mappings = [binary_encode(data,i) for i in range(2,10)]
print mappings
#将第2-9列转换为二元编码
from pyspark.mllib.regression import LabeledPoint
import numpy as np
def extract_feature(line):
lst = []
for ind in range(2,10):
dic = mappings[ind-2]
for i,j in dic.iteritems():
if line[ind] == i:
lst.append(1.0)
else:
lst.append(0)
for ind in range(10,14):
lst.append(line[ind])
return np.array(map(float,lst))
def extract_label(line):
return float(line[-1])
binarydata = data.map(lambda x : LabeledPoint(extract_label(x), extract_feature(x)))
print binarydata.first()
print 'Raw data: '+ str(data.first()[2:])
print 'Raw Label: ' + str(data.first()[-1])
print 'Binary data: ' + str(binarydata.first().features)
print 'Binary label: '+ str(binarydata.first().label)
print 'Features number: ' + str(len(binarydata.first().features))
```
决策树中可以直接使用原始数据,因此只需要将u'1'格式转换为float,再用numpy封装一下
```
def extract_float(line):
return np.array(line[2:14])
treedata = data.map(lambda x: LabeledPoint(extract_label(x), extract_float(x)))
print 'Raw data: '+ str(data.first()[2:])
print 'Raw Label: ' + str(data.first()[-1])
print 'Desicion tree data: ' + str(treedata.first().features)
print 'Desicion tree Label: ' + str(treedata.first().label)
```
训练线性回归模型和决策树模型
```
from pyspark.mllib.regression import LinearRegressionWithSGD
linear_model = LinearRegressionWithSGD.train(binarydata, iterations = 10, step = 0.1, intercept = False)
linear_pred_act = binarydata.map(lambda x: (linear_model.predict(x.features),x.label))
print 'linear predictions vs actual:'
print linear_pred_act.take(5)
from pyspark.mllib.tree import DecisionTree
tree_model = DecisionTree.trainRegressor(treedata,{})
preds = tree_model.predict(treedata.map(lambda x:x.features))
actual = treedata.map(lambda x: x.label)
tree_pred_act = preds.zip(actual)
print tree_pred_act.take(5)
print tree_model.depth()
print tree_model.numNodes()
```
评估模型的性能,采用均方误差,均方根误差,平均绝对误差,R平方系数
```
def squared_error(i,j):
return (i-j)**2
def abs_error(i,j):
return np.abs(i-j)
def square_log_error(i,j):
return(np.log(i+1) - np.log(j+1))**2
mse = linear_pred_act.map(lambda (i,j): squared_error(i,j)).mean()
mae = linear_pred_act.map(lambda (i,j): abs_error(i,j)).mean()
rmsle = np.sqrt(linear_pred_act.map(lambda (i,j): square_log_error(i,j)).mean())
print 'mean squared error: '+ str(mse)
print 'mean absolute error: ' + str(mae)
print 'rot mean squred log error: ' + str(rmsle)
tree_mse = tree_pred_act.map(lambda (i,j): squared_error(i,j)).mean()
tree_mae = tree_pred_act.map(lambda (i,j): abs_error(i,j)).mean()
tree_rmsle = np.sqrt(tree_pred_act.map(lambda (i,j): square_log_error(i,j)).mean())
print 'desicion tree mean squared error: '+ str(tree_mse)
print 'desicion tree mean absolute error: ' + str(tree_mae)
print 'desicion tree rot mean squred log error: ' + str(tree_rmsle)
import matplotlib.pyplot as plt
targets = data.map(lambda x:float(x[-1])).collect()
plt.hist(targets,bins = 40,normed = True,color = 'lightblue')
```
尝试取对数和平方根
```
plt.hist(np.log(targets),bins = 40,normed = True,color = 'lightblue')
plt.hist(np.sqrt(targets),bins = 40,normed = True,color = 'lightblue')
```
接下来尝试用对数拟合模型
```
binarydata_log = binarydata.map(lambda x: LabeledPoint(np.log(x.label), x.features))
linear_model_log = LinearRegressionWithSGD.train(binarydata_log, iterations = 10, step = 0.1, intercept = False)
linear_pred_act_log = binarydata_log.map(lambda x: (np.exp(linear_model_log.predict(x.features)),np.exp(x.label)))
print 'linear predictions vs actual:'
print linear_pred_act_log.take(5)
mse = linear_pred_act_log.map(lambda (i,j): squared_error(i,j)).mean()
mae = linear_pred_act_log.map(lambda (i,j): abs_error(i,j)).mean()
rmsle = np.sqrt(linear_pred_act_log.map(lambda (i,j): square_log_error(i,j)).mean())
print 'mean squared error: '+ str(mse)
print 'mean absolute error: ' + str(mae)
print 'rot mean squred log error: ' + str(rmsle)
treedata_log = treedata.map(lambda x:LabeledPoint(np.log(x.label), x.features))
tree_model_log = DecisionTree.trainRegressor(treedata_log,{})
preds_log = tree_model_log.predict(treedata_log.map(lambda x:x.features)).map(lambda x: np.exp(x))
actual_log = treedata_log.map(lambda x: x.label).map(lambda x: np.exp(x))
tree_pred_act_log = preds_log.zip(actual_log)
tree_mse_log = tree_pred_act_log.map(lambda (i,j): squared_error(i,j)).mean()
tree_mae_log = tree_pred_act_log.map(lambda (i,j): abs_error(i,j)).mean()
tree_rmsle_log = np.sqrt(tree_pred_act_log.map(lambda (i,j): square_log_error(i,j)).mean())
print 'desicion tree mean squared error: '+ str(tree_mse_log)
print 'desicion tree mean absolute error: ' + str(tree_mae_log)
print 'desicion tree rot mean squred log error: ' + str(tree_rmsle_log)
```
可以看到取对数对模型效果影响不大
创建训练集和测试集,测试不同参数对模型性能的影响
```
data_with_index = binarydata.zipWithIndex().map(lambda (i,j): (j,i))
test = data_with_index.sample(False,0.2,42)
train = data_with_index.subtractByKey(test)
train_data = train.map(lambda (i,j):j)
test_data = test.map(lambda (i,j):j)
train_size = train_data.count()
test_size = test_data.count()
print "Training data size: %d" % train_size
print "Test data size: %d" % test_size
tree_data_with_index = treedata.zipWithIndex().map(lambda (i,j): (j,i))
tree_test = tree_data_with_index.sample(False,0.2,42)
tree_train = tree_data_with_index.subtractByKey(tree_test)
tree_train_data = train.map(lambda (i,j):j)
tree_test_data = test.map(lambda (i,j):j)
def evaluate(train,test,iterations ,step,regParam,regType,intercept):
model = LinearRegressionWithSGD.train(train,iterations,step,regParam = regParam,regType = regType, intercept = intercept)
tp = test.map(lambda x:(x.label,model.predict(x.features)))
rmsle = np.sqrt(tp.map(lambda(i,j): square_log_error(i,j)).mean())
return rmsle
params = [1,5,10,20,50,100,200,500,1000]
metrics = [evaluate(train_data,test_data,param,0.01,0.0,'l2',False) for param in params]
print params
print metrics
plt.plot(np.log(params),metrics)
params = [0.01,0.025,0.05,0.1,1]
metrics = [evaluate(train_data,test_data,10,param,0.0,'l2',False) for param in params]
print params
print metrics
plt.plot(np.log(params),metrics)
params = [0.0,0.01,0.1,1.0,5.0,10.0,20.0]
metrics = [evaluate(train_data,test_data,10,0.1,param,'l2',False) for param in params]
print params
print metrics
plt.plot(np.log(params),metrics)
params = [0.0,0.01,0.1,1.0,10.0,100.0,1000.0]
metrics = [evaluate(train_data,test_data,10,0.1,param,'l1',False) for param in params]
print params
print metrics
plt.plot(np.log(params),metrics)
params = [False,True]
metrics = [evaluate(train_data,test_data,10,0.1,1.0,'l1',param) for param in params]
print params
print metrics
plt.bar(params,metrics,color = 'lightblue')
```
可以发现线性回归模型的rmsle一直比较大,因此尝试优化决策树的参数
```
def tree_evaluate(train,test,maxDepth,maxBins):
model = DecisionTree.trainRegressor(train,{},impurity = 'variance', maxDepth = maxDepth, maxBins = maxBins)
preds = model.predict(test.map(lambda x:x.features))
actual = test.map(lambda x:x.label)
tp = actual.zip(preds)
rmsle = np.sqrt(tp.map(lambda (i,j): square_log_error(i,j)).mean())
return rmsle
params = [1,2,3,4,5,10,20]
metrics = [tree_evaluate(tree_train_data,tree_test_data,param,32) for param in params]
print params
print metrics
plt.plot(params,metrics)
params = [2,4,8,16,32,64,100]
metrics = [tree_evaluate(tree_train_data,tree_test_data,20,param) for param in params]
print params
print metrics
plt.plot(params,metrics)
```
|
github_jupyter
|
for line in open('data/Readme.txt').readlines():
print line.strip()
rawdata = sc.textFile('data/hour.csv')
data = rawdata.map(lambda x:x.split(','))
for i in data.take(5):
print i
data = data.filter(lambda x: x[0] != 'instant')
data.cache()
print data.count()
def binary_encode(rdd, i):
return (rdd.
map(lambda x :x[i])
.distinct()
.zipWithIndex()
.collectAsMap())
#对第2-9列应用该函数得到转换的字典
mappings = [binary_encode(data,i) for i in range(2,10)]
print mappings
#将第2-9列转换为二元编码
from pyspark.mllib.regression import LabeledPoint
import numpy as np
def extract_feature(line):
lst = []
for ind in range(2,10):
dic = mappings[ind-2]
for i,j in dic.iteritems():
if line[ind] == i:
lst.append(1.0)
else:
lst.append(0)
for ind in range(10,14):
lst.append(line[ind])
return np.array(map(float,lst))
def extract_label(line):
return float(line[-1])
binarydata = data.map(lambda x : LabeledPoint(extract_label(x), extract_feature(x)))
print binarydata.first()
print 'Raw data: '+ str(data.first()[2:])
print 'Raw Label: ' + str(data.first()[-1])
print 'Binary data: ' + str(binarydata.first().features)
print 'Binary label: '+ str(binarydata.first().label)
print 'Features number: ' + str(len(binarydata.first().features))
def extract_float(line):
return np.array(line[2:14])
treedata = data.map(lambda x: LabeledPoint(extract_label(x), extract_float(x)))
print 'Raw data: '+ str(data.first()[2:])
print 'Raw Label: ' + str(data.first()[-1])
print 'Desicion tree data: ' + str(treedata.first().features)
print 'Desicion tree Label: ' + str(treedata.first().label)
from pyspark.mllib.regression import LinearRegressionWithSGD
linear_model = LinearRegressionWithSGD.train(binarydata, iterations = 10, step = 0.1, intercept = False)
linear_pred_act = binarydata.map(lambda x: (linear_model.predict(x.features),x.label))
print 'linear predictions vs actual:'
print linear_pred_act.take(5)
from pyspark.mllib.tree import DecisionTree
tree_model = DecisionTree.trainRegressor(treedata,{})
preds = tree_model.predict(treedata.map(lambda x:x.features))
actual = treedata.map(lambda x: x.label)
tree_pred_act = preds.zip(actual)
print tree_pred_act.take(5)
print tree_model.depth()
print tree_model.numNodes()
def squared_error(i,j):
return (i-j)**2
def abs_error(i,j):
return np.abs(i-j)
def square_log_error(i,j):
return(np.log(i+1) - np.log(j+1))**2
mse = linear_pred_act.map(lambda (i,j): squared_error(i,j)).mean()
mae = linear_pred_act.map(lambda (i,j): abs_error(i,j)).mean()
rmsle = np.sqrt(linear_pred_act.map(lambda (i,j): square_log_error(i,j)).mean())
print 'mean squared error: '+ str(mse)
print 'mean absolute error: ' + str(mae)
print 'rot mean squred log error: ' + str(rmsle)
tree_mse = tree_pred_act.map(lambda (i,j): squared_error(i,j)).mean()
tree_mae = tree_pred_act.map(lambda (i,j): abs_error(i,j)).mean()
tree_rmsle = np.sqrt(tree_pred_act.map(lambda (i,j): square_log_error(i,j)).mean())
print 'desicion tree mean squared error: '+ str(tree_mse)
print 'desicion tree mean absolute error: ' + str(tree_mae)
print 'desicion tree rot mean squred log error: ' + str(tree_rmsle)
import matplotlib.pyplot as plt
targets = data.map(lambda x:float(x[-1])).collect()
plt.hist(targets,bins = 40,normed = True,color = 'lightblue')
plt.hist(np.log(targets),bins = 40,normed = True,color = 'lightblue')
plt.hist(np.sqrt(targets),bins = 40,normed = True,color = 'lightblue')
binarydata_log = binarydata.map(lambda x: LabeledPoint(np.log(x.label), x.features))
linear_model_log = LinearRegressionWithSGD.train(binarydata_log, iterations = 10, step = 0.1, intercept = False)
linear_pred_act_log = binarydata_log.map(lambda x: (np.exp(linear_model_log.predict(x.features)),np.exp(x.label)))
print 'linear predictions vs actual:'
print linear_pred_act_log.take(5)
mse = linear_pred_act_log.map(lambda (i,j): squared_error(i,j)).mean()
mae = linear_pred_act_log.map(lambda (i,j): abs_error(i,j)).mean()
rmsle = np.sqrt(linear_pred_act_log.map(lambda (i,j): square_log_error(i,j)).mean())
print 'mean squared error: '+ str(mse)
print 'mean absolute error: ' + str(mae)
print 'rot mean squred log error: ' + str(rmsle)
treedata_log = treedata.map(lambda x:LabeledPoint(np.log(x.label), x.features))
tree_model_log = DecisionTree.trainRegressor(treedata_log,{})
preds_log = tree_model_log.predict(treedata_log.map(lambda x:x.features)).map(lambda x: np.exp(x))
actual_log = treedata_log.map(lambda x: x.label).map(lambda x: np.exp(x))
tree_pred_act_log = preds_log.zip(actual_log)
tree_mse_log = tree_pred_act_log.map(lambda (i,j): squared_error(i,j)).mean()
tree_mae_log = tree_pred_act_log.map(lambda (i,j): abs_error(i,j)).mean()
tree_rmsle_log = np.sqrt(tree_pred_act_log.map(lambda (i,j): square_log_error(i,j)).mean())
print 'desicion tree mean squared error: '+ str(tree_mse_log)
print 'desicion tree mean absolute error: ' + str(tree_mae_log)
print 'desicion tree rot mean squred log error: ' + str(tree_rmsle_log)
data_with_index = binarydata.zipWithIndex().map(lambda (i,j): (j,i))
test = data_with_index.sample(False,0.2,42)
train = data_with_index.subtractByKey(test)
train_data = train.map(lambda (i,j):j)
test_data = test.map(lambda (i,j):j)
train_size = train_data.count()
test_size = test_data.count()
print "Training data size: %d" % train_size
print "Test data size: %d" % test_size
tree_data_with_index = treedata.zipWithIndex().map(lambda (i,j): (j,i))
tree_test = tree_data_with_index.sample(False,0.2,42)
tree_train = tree_data_with_index.subtractByKey(tree_test)
tree_train_data = train.map(lambda (i,j):j)
tree_test_data = test.map(lambda (i,j):j)
def evaluate(train,test,iterations ,step,regParam,regType,intercept):
model = LinearRegressionWithSGD.train(train,iterations,step,regParam = regParam,regType = regType, intercept = intercept)
tp = test.map(lambda x:(x.label,model.predict(x.features)))
rmsle = np.sqrt(tp.map(lambda(i,j): square_log_error(i,j)).mean())
return rmsle
params = [1,5,10,20,50,100,200,500,1000]
metrics = [evaluate(train_data,test_data,param,0.01,0.0,'l2',False) for param in params]
print params
print metrics
plt.plot(np.log(params),metrics)
params = [0.01,0.025,0.05,0.1,1]
metrics = [evaluate(train_data,test_data,10,param,0.0,'l2',False) for param in params]
print params
print metrics
plt.plot(np.log(params),metrics)
params = [0.0,0.01,0.1,1.0,5.0,10.0,20.0]
metrics = [evaluate(train_data,test_data,10,0.1,param,'l2',False) for param in params]
print params
print metrics
plt.plot(np.log(params),metrics)
params = [0.0,0.01,0.1,1.0,10.0,100.0,1000.0]
metrics = [evaluate(train_data,test_data,10,0.1,param,'l1',False) for param in params]
print params
print metrics
plt.plot(np.log(params),metrics)
params = [False,True]
metrics = [evaluate(train_data,test_data,10,0.1,1.0,'l1',param) for param in params]
print params
print metrics
plt.bar(params,metrics,color = 'lightblue')
def tree_evaluate(train,test,maxDepth,maxBins):
model = DecisionTree.trainRegressor(train,{},impurity = 'variance', maxDepth = maxDepth, maxBins = maxBins)
preds = model.predict(test.map(lambda x:x.features))
actual = test.map(lambda x:x.label)
tp = actual.zip(preds)
rmsle = np.sqrt(tp.map(lambda (i,j): square_log_error(i,j)).mean())
return rmsle
params = [1,2,3,4,5,10,20]
metrics = [tree_evaluate(tree_train_data,tree_test_data,param,32) for param in params]
print params
print metrics
plt.plot(params,metrics)
params = [2,4,8,16,32,64,100]
metrics = [tree_evaluate(tree_train_data,tree_test_data,20,param) for param in params]
print params
print metrics
plt.plot(params,metrics)
| 0.47658 | 0.917377 |
# Chapter 2: Tests
In this chapter, we explain the main idea behind test classes `InteractiveTest`, `BatchTest` and `BatchTestWithSplit`.
## Test Class Interface
Test class interface is defined by `cognibench.testing.CNBTest` class, which defines the functions to be implemented by concrete test clasess and provides the base implementation for multi-subject testing in addition to helper functionalities such as data and log persistence.
```
from cognibench.testing import CNBTest
print('CNBTest documentation')
print('-----------------------------')
print(CNBTest.__doc__)
```
## Interactive Tests
`InteractiveTest` is test class which **requires** the models to implement `cognibench.capabilities.Interactive` interface. Such models should implement the methods that allow them to be trained in a continuous manner. This is in contrast to models that first must be trained, and then can only predict actions for stimuli with no further training.
```
from cognibench.testing import InteractiveTest
print('InteractiveTest documentation')
print('-----------------------------')
print(InteractiveTest.__doc__)
# models are required to implement cognibench.capabilities.Interactive
InteractiveTest.required_capabilities
```
`InteractiveTest` class overrides the `predict_single` and `compute_score_single` methods of `cognibench.testing.CNBTest` class to test a given model interactively. It requires three sets of observations:
* stimuli
* actions
* rewards
## Batch Tests
`BatchTest` is test class which generates test predictions from a model by providing all the samples together. Since this is a very common way of testing models in general, `cognibench` offers such a test class, as well.
```
from cognibench.testing import BatchTest
print('BatchTest documentation')
print('-----------------------------')
print(BatchTest.__doc__)
```
## Batch Tests With Splitting Functionality
By default, `cognibench` testing classes uses the same samples for both model optimization and prediction generation. However, this behaviour can be easily modified by overriding `get_fitting_observations_single` and `get_testing_observations_single` methods, which is exactly what `BatchTestWithSplit` class does. If you want to use separate samples for model fitting and prediction generation, you can use `BatchTestWithSplit` class.
```
from cognibench.testing import BatchTestWithSplit
print('BatchTestWithSplit documentation')
print('-----------------------------')
print(BatchTestWithSplit.__doc__)
```
## Implementing Your Own Testing Logic
In case various testing manners provided by `cognibench` does not satisfy your requirements, you can implement your own testing logic by subclassing from `CNBTest` class and defining how a single-subject model should be tested. This has the benefit that your class will automatically have multi-subject testing functionality, as well. If you want to follow this route, please look at the implementation of the above concrete test classes which is found in `cognibench/testing/tests.py`.
|
github_jupyter
|
from cognibench.testing import CNBTest
print('CNBTest documentation')
print('-----------------------------')
print(CNBTest.__doc__)
from cognibench.testing import InteractiveTest
print('InteractiveTest documentation')
print('-----------------------------')
print(InteractiveTest.__doc__)
# models are required to implement cognibench.capabilities.Interactive
InteractiveTest.required_capabilities
from cognibench.testing import BatchTest
print('BatchTest documentation')
print('-----------------------------')
print(BatchTest.__doc__)
from cognibench.testing import BatchTestWithSplit
print('BatchTestWithSplit documentation')
print('-----------------------------')
print(BatchTestWithSplit.__doc__)
| 0.337859 | 0.98558 |
### Exercice 1:
Write a function that takes a string, and two indexes (integers), and returns a slice of that string as well as the length of the string.
Call the function, and store the results in two variables
```
def slice2str(a,b,c):
if(b<c):
return a[b:c+1]
else:
return a[c:b+1]
slice2str("hello world",2,7)
```
### Exercice 2:
1. Write a function that takes two arguments, and returns the sum of the two.
2. Call the function with the values 1 and 2.1, then check the type of the result.
3. Call the function with the values False and 2, then check the type of the result.
```
def sum(m,n):
return m+n
print(sum(1,2.1))
type(sum(1,2.1))
print(sum(False,2))
type(sum(False,2))
```
### Exercice 3:
Write a function that takes a string and returns a dictionary containing character frequency.
*example*
string sample: 'test'
expected result: {'t':2, 'e':1, 's':1}
```
def dictionary(string):
Dict = {}
for i in string:
if i in Dict:
Dict[i] += 1
else:
Dict[i] = 1
print (Dict)
dictionary('test')
```
### Exercice 4:
Given two lists, iterate both lists simultaniously, such the first list would display item in original order and the second list in reverse order
*example*:
```
from lib import print_orl #this is a lib you dont have access to
print_orl([1,2,3],[1,2,3])
def reverse(L):
n=len(L)
for i in range (0,n):
print (L[i],'',L[n-i-1])
reverse([1,2,3])
```
### Exercice 5:
Given two 2d arrays, stack them horizontally.
*example:*
```
import numpy as np
#The input
a = np.ones((1, 10)).reshape(2, -1)
b = np.zeros((1, 10)).reshape(2, -1)
#Try to print input
print(a)
print(b)
#concatenate horizontally
def concat(a,b):
C=np.zeros((1,20)).reshape(2,-1)
k=0
for i in range(5):
C[k][i]=a[k][i]
C[k][i+5]=b[k][i]
k=1
for j in range(5):
C[k][j]=a[k][j]
C[k][j+5]=b[k][j]
return C
concat(a,b)
```
### exercice 6:
Create the following patterns using the bellow arrays a and b, and numpy functions:
```
import numpy as np
a = np.arange(3)
b = np.ones((3,3,3))
print(a)
print(b)
print('output1')
c=[]
for i in range(3):
c.append(0)
for i in range(3):
c.append(1)
for i in range(3):
c.append(2)
for i in range (3):
for k in range(3):
c.append(a[k])
print(c)
print('output2')
d=np.ones((3,3,3))
for i in range (3):
for j in range(3):
k = 1
d[i][j][k] = d[i][j][k] + 1
k = 2
d[i][j][k] = d[i][j][k] + 2
print(d)
print('output3')
b=b.reshape(3,9)
print(b)
```
|
github_jupyter
|
def slice2str(a,b,c):
if(b<c):
return a[b:c+1]
else:
return a[c:b+1]
slice2str("hello world",2,7)
def sum(m,n):
return m+n
print(sum(1,2.1))
type(sum(1,2.1))
print(sum(False,2))
type(sum(False,2))
def dictionary(string):
Dict = {}
for i in string:
if i in Dict:
Dict[i] += 1
else:
Dict[i] = 1
print (Dict)
dictionary('test')
from lib import print_orl #this is a lib you dont have access to
print_orl([1,2,3],[1,2,3])
def reverse(L):
n=len(L)
for i in range (0,n):
print (L[i],'',L[n-i-1])
reverse([1,2,3])
import numpy as np
#The input
a = np.ones((1, 10)).reshape(2, -1)
b = np.zeros((1, 10)).reshape(2, -1)
#Try to print input
print(a)
print(b)
#concatenate horizontally
def concat(a,b):
C=np.zeros((1,20)).reshape(2,-1)
k=0
for i in range(5):
C[k][i]=a[k][i]
C[k][i+5]=b[k][i]
k=1
for j in range(5):
C[k][j]=a[k][j]
C[k][j+5]=b[k][j]
return C
concat(a,b)
import numpy as np
a = np.arange(3)
b = np.ones((3,3,3))
print(a)
print(b)
print('output1')
c=[]
for i in range(3):
c.append(0)
for i in range(3):
c.append(1)
for i in range(3):
c.append(2)
for i in range (3):
for k in range(3):
c.append(a[k])
print(c)
print('output2')
d=np.ones((3,3,3))
for i in range (3):
for j in range(3):
k = 1
d[i][j][k] = d[i][j][k] + 1
k = 2
d[i][j][k] = d[i][j][k] + 2
print(d)
print('output3')
b=b.reshape(3,9)
print(b)
| 0.101701 | 0.980894 |
# Lecture 2: Quantifying Uncertainties in Physical Models
> Ignorance is preferable to error and he is less remote from the truth who believes nothing than he who believes what is wrong.
Thomas Jefferson (1781)
## Objectives
+ To tell the difference between **aleatory** and **epistemic** uncertainties.
+ To define **predictive modeling**.
+ To use **probability theory** to represent both aleatory and epistemic uncertainties.
+ To **propagate uncertainty** through a physical model using Monte Carlo.
## Readings
+ [Oden, Moser, Ghattas, Computer Predictions with Quantified Uncertainty, Part I](http://www.siam.org/pdf/news/1842.pdf)
+ [Oden, Moser, Ghattas, Computer Predictions with Quantified Uncertainty, Part II](http://www.siam.org/pdf/news/1857.pdf)
## Definitions
We are not going to make a big effort to be consistent about the use of the following terms, since their precise meaning is still under debate.
### Uncertainty
In general, we are uncertain about a logical proposition if we do not know whether it is true or false.
In particular, we can be uncertain about:
+ the value of a model parameter;
+ the mathematical form of a model;
+ the initial conditions of a ordinary differntial equations;
+ the boundary conditions of a partial differential equation;
+ the value of an experimental measurment we are about to perform;
+ etc.
Uncertainty may be *aleatory* or *epistemic*. Aleatory uncertainty is associated with inherent system randomness. Epistemic uncertainty is associated with lack of knowledge. If you think too hard, the distinction between the two becomes philosophical. We are not going to push this too hard. Fortunately, our approach (the Bayesian approach) treats both uncertainties on an equal footing.
### Predictive Modeling
*Predictive modeling* is the process of assigning error bars to the predictions of computational models.
Ideally, these error bars rigorously quantify the effect of all associated uncertainties.
Having quantified and propagated uncertainties through the computational models, one can assess the risk of making decisions based on the model predictions.
## Example: Catalytic Conversion of Nitrate to Nitrogen
This is Example 3.1 of [(Tsilifis, 2014)](http://arxiv.org/abs/1410.5522).
Consider the catalytic
conversion of nitrate ($\mbox{NO}_3^-$) to nitrogen ($\mbox{N}_2$) and other
by-products by electrochemical means.
The mechanism that is followed is complex and not well understood.
The experiment of [(Katsounaros, 2012)](http://www.sciencedirect.com/science/article/pii/S0013468612005208) confirmed the
production of nitrogen ($\mbox{N}_2$), ammonia
($\mbox{NH}_3$), and nitrous oxide ($\mbox{N}_2\mbox{O}$) as final products
of the reaction, as well as the intermediate production of nitrite ($\mbox{NO}_2^-$).
The data are reproduced in [Comma-separated values](https://en.wikipedia.org/wiki/Comma-separated_values) (CSV) and stored in
[data/catalysis.csv](data/catalysis.csv).
The time is measured in minutes and the conentrations are measured in $\mbox{mmol}\cdot\mbox{L}^{-1}$.
Let's load the data into this notebook using the [Pandas](http://pandas.pydata.org) Python module:
```
# If this fails, you haven't uploaded "catalysis.csv".
# Repeat 11 of the instructions.
import pandas as pd
catalysis_data = pd.read_csv('catalysis.csv', index_col=0)
catalysis_data
```
Let's visualize the data using [Matplotlib](http://matplotlib.org):
```
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline
catalysis_data.plot()
```
The theory of catalytic reactions guarantees that the total mass must be conserved.
However, this is not the case in our dataset:
```
catalysis_data.sum(axis=1)
```
This inconsistency suggests the existence of an intermediate unobserved reaction product X.
[(Katsounaros, 2012)](http://www.sciencedirect.com/science/article/pii/S0013468612005208) suggested that the following reaction path shown in the following figure.

The dynamical system associated with the reaction is:
$$
\begin{array}{cc}
\frac{d \left[\mbox{NO}_3^-\right]}{dt} &= -k_1\left[\mbox{NO}_3^-\right], \\
\frac{d\left[\mbox{NO}_2^-\right]}{dt} &= k_1\left[\mbox{NO}_3^-\right] - (k_2 + k_4 +
k_5)[\mbox{NO}_2^-], \\
\frac{d \left[\mbox{X}\right]}{dt} &= k_2 \left[\mbox{NO}_2^-\right] - k_3 [X],\\
\frac{d \left[\mbox{N}_2\right]}{dt} &= k_3 \left[\mbox{X}\right], \\
\frac{d \left[\mbox{NH}_3\right]}{dt} &= k_4 \left[\mbox{NO}_2^-\right],\\
\frac{d \left[\mbox{N}_2O\right]}{dt} &= k_5 \left[\mbox{NO}_2^-\right],
\end{array}
$$
where $[\cdot]$ denotes the concentration of a quantity, and
$k_i > 0$, $i=1,...5$ are the *kinetic rate constants*.
### Questions 01
<span><a class="pd-embed" id="pd1514456211022" href="http://ebilionis.polldaddy.com/s/handout-01-01">Click here to respond</a></span>
<script type="text/javascript">
var _polldaddy = [] || _polldaddy;
_polldaddy.push( {
type: 'button',
title: 'Take Our Survey!',
style: 'inline',
domain: 'ebilionis.polldaddy.com/s/',
id: 'handout-01-01',
placeholder: 'pd1514456211022'
} );
(function(d,c,j){if(!document.getElementById(j)){var pd=d.createElement(c),s;pd.id=j;pd.src=('https:'==document.location.protocol)?'https://polldaddy.com/survey.js':'http://i0.poll.fm/survey.js';s=document.getElementsByTagName(c)[0];s.parentNode.insertBefore(pd,s);}}(document,'script','pd-embed'));
</script>
### Computational Model
We will develop a generic computational model for the solution of dynamical systems and we will use it to study the catalysis problem. The code relies on the [Fourth-order Runge-Kutta method](https://en.wikipedia.org/wiki/Runge–Kutta_methods) and is a modified copy of [http://www.math-cs.gordon.edu/courses/ma342/python/diffeq.py](http://www.math-cs.gordon.edu/courses/ma342/python/diffeq.py) developed by Jonathan Senning. The code solves:
$$
\begin{array}{ccc}
\dot{\mathbf{y}} &=& f(\mathbf{y}, t),\\
\mathbf{y}(0) &=& \mathbf{y}_0.
\end{array}
$$
```
import numpy as np
def rk45( f, y0, t, args=() ):
"""Fourth-order Runge-Kutta method with error estimate.
USAGE:
y = rk45(f, x0, t, args=())
INPUT:
f - function of x and t equal to dx/dt. x may be multivalued,
in which case it should a list or a NumPy array. In this
case f must return a NumPy array with the same dimension
as x.
y0 - the initial condition(s). Specifies the value of x when
t = t[0]. Can be either a scalar or a list or NumPy array
if a system of equations is being solved.
t - list or NumPy array of t values to compute solution at.
t[0] is the the initial condition point, and the difference
h=t[i+1]-t[i] determines the step size h.
args - any other parameters of the function f.
OUTPUT:
y - NumPy array containing solution values corresponding to each
entry in t array. If a system is being solved, x will be
an array of arrays.
NOTES:
This version is based on the algorithm presented in "Numerical
Mathematics and Computing" 6th Edition, by Cheney and Kincaid,
Brooks-Cole, 2008.
"""
# Coefficients used to compute the independent variable argument of f
c20 = 2.500000000000000e-01 # 1/4
c30 = 3.750000000000000e-01 # 3/8
c40 = 9.230769230769231e-01 # 12/13
c50 = 1.000000000000000e+00 # 1
c60 = 5.000000000000000e-01 # 1/2
# Coefficients used to compute the dependent variable argument of f
c21 = 2.500000000000000e-01 # 1/4
c31 = 9.375000000000000e-02 # 3/32
c32 = 2.812500000000000e-01 # 9/32
c41 = 8.793809740555303e-01 # 1932/2197
c42 = -3.277196176604461e+00 # -7200/2197
c43 = 3.320892125625853e+00 # 7296/2197
c51 = 2.032407407407407e+00 # 439/216
c52 = -8.000000000000000e+00 # -8
c53 = 7.173489278752436e+00 # 3680/513
c54 = -2.058966861598441e-01 # -845/4104
c61 = -2.962962962962963e-01 # -8/27
c62 = 2.000000000000000e+00 # 2
c63 = -1.381676413255361e+00 # -3544/2565
c64 = 4.529727095516569e-01 # 1859/4104
c65 = -2.750000000000000e-01 # -11/40
# Coefficients used to compute 4th order RK estimate
a1 = 1.157407407407407e-01 # 25/216
a2 = 0.000000000000000e-00 # 0
a3 = 5.489278752436647e-01 # 1408/2565
a4 = 5.353313840155945e-01 # 2197/4104
a5 = -2.000000000000000e-01 # -1/5
b1 = 1.185185185185185e-01 # 16.0/135.0
b2 = 0.000000000000000e-00 # 0
b3 = 5.189863547758284e-01 # 6656.0/12825.0
b4 = 5.061314903420167e-01 # 28561.0/56430.0
b5 = -1.800000000000000e-01 # -9.0/50.0
b6 = 3.636363636363636e-02 # 2.0/55.0
n = len( t )
y = np.array( [ y0 ] * n )
for i in xrange( n - 1 ):
h = t[i+1] - t[i]
k1 = h * f( y[i], t[i], *args )
k2 = h * f( y[i] + c21 * k1, t[i] + c20 * h, *args )
k3 = h * f( y[i] + c31 * k1 + c32 * k2, t[i] + c30 * h, *args )
k4 = h * f( y[i] + c41 * k1 + c42 * k2 + c43 * k3, t[i] + c40 * h, *args )
k5 = h * f( y[i] + c51 * k1 + c52 * k2 + c53 * k3 + c54 * k4, \
t[i] + h, *args )
k6 = h * f( \
y[i] + c61 * k1 + c62 * k2 + c63 * k3 + c64 * k4 + c65 * k5, \
t[i] + c60 * h, *args )
y[i+1] = y[i] + a1 * k1 + a3 * k3 + a4 * k4 + a5 * k5
y5 = y[i] + b1 * k1 + b3 * k3 + b4 * k4 + b5 * k5 + b6 * k6
return y
```
## Calibrating the Catalysis Model to the Experimental Data
Now that we are certain that our generic ODE solver works, let us use it to develop a solver for the catalysis model. All, we need to do is define the right hand side of the dynamics:
```
def f_catalysis(y, t, kappa):
rhs = np.zeros((6,))
rhs[0] = -kappa[0] * y[0]
rhs[1] = kappa[0] * y[0] - (kappa[1] + kappa[3] + kappa[4]) * y[1]
rhs[2] = kappa[1] * y[1] - kappa[2] * y[2]
rhs[3] = kappa[2] * y[2]
rhs[4] = kappa[3] * y[1]
rhs[5] = kappa[4] * y[1]
return rhs
```
Let's try to calibrate the parameters of the model to the data, manually. Because the parameters are too small, let us work with the transformed version:
$$
\xi_i = \log\left(180k_i\right).
$$
```
from ipywidgets import interactive
def compare_model_to_data(xi1 = 1.359, xi2 = 1.657, xi3 = 1.347, xi4 = -.162, xi5 = -1.009):
"""
Compare the model predictions to the data.
"""
t = np.linspace(0, 180, 100)
kappa = np.exp([xi1, xi2, xi3, xi4, xi5]) / 180.
y = rk45(f_catalysis, (500., 0., 0., 0., 0., 0.), t, args=(kappa,))
fig, ax = plt.subplots(figsize=(10, 10))
catalysis_data.plot(ax=ax, style='s')
ax.plot(t, y[:, 0], color=sns.color_palette()[0], label='Model NO3-')
ax.plot(t, y[:, 1], color=sns.color_palette()[1], label='Model NO2-')
ax.plot(t, y[:, 2], color=sns.color_palette()[5], label='Model X')
ax.plot(t, y[:, 3], color=sns.color_palette()[2], label='Model N2')
ax.plot(t, y[:, 4], color=sns.color_palette()[3], label='Model NH3')
ax.plot(t, y[:, 5], color=sns.color_palette()[4], label='Model N2O')
plt.legend()
interactive(compare_model_to_data, xi1 = (-2, 2, 0.05), xi2 = (-2, 2, 0.05), xi3 = (-2, 2, 0.05),
xi4 = (-2, 2, 0.05), xi5 = (-2, 2, 0.05) )
```
This is the calibration problem.
### Questions 02
<span><a class="pd-embed" id="pd1514457269730" href="http://ebilionis.polldaddy.com/s/handout-01-02">Click to respond.</a></span>
<script type="text/javascript">
var _polldaddy = [] || _polldaddy;
_polldaddy.push( {
type: 'button',
title: 'Click to respond.',
style: 'inline',
domain: 'ebilionis.polldaddy.com/s/',
id: 'handout-01-02',
placeholder: 'pd1514457269730'
} );
(function(d,c,j){if(!document.getElementById(j)){var pd=d.createElement(c),s;pd.id=j;pd.src=('https:'==document.location.protocol)?'https://polldaddy.com/survey.js':'http://i0.poll.fm/survey.js';s=document.getElementsByTagName(c)[0];s.parentNode.insertBefore(pd,s);}}(document,'script','pd-embed'));
</script>
### Uncertainty Propagation
As discussed in Question 2 above, there various reasons why a model cannot be calibrated perfectly. Some of these are:
+ lack of data;
+ the existence of measurement noise;
+ the fact that the model is just not perfect.
Ignoring for the moment the possibility that the model is just bluntly wrong, we see that the lack of data or the presence of noise will induce some uncertainty in the values of the calibrated parameters. We are going to represent uncertainty on parameters by assigning a probability density on them. There are systematic ways of estimating the uncertainty induced because of the calibration process, but this will not concern us now.
For the moment, assume that somebody told us that the uncertainty in the scaled parameters $\xi_i$ of the model is as follows:
| Variable | Value |
|---------|------------------|
| $\xi_1$ |$1.35\pm 0.05$ |
| $\xi_2$ |$1.65\pm 0.08$ |
| $\xi_3$ |$1.34\pm 0.11$ |
| $\xi_4$ |$-0.16\pm 0.16$ |
| $\xi_5$ |$-3.84\pm 0.20$ |
But what does this information actually mean? As we will discuss in the following lectures, this information can be used to assign a probability density on each one of these parameters, say $p(\xi_i)$, that *models* our state of knowledge about them. For example, let us assume that our state of knowledge about $\xi_1$ is given by a Gaussian probability density:
$$
p(\xi_1) = \mathcal{N}(\xi_1|\mu_1=1.35, \sigma^2 = 0.05^2),
$$
which we can visualize as follows:
```
import scipy.stats
from scipy.stats import norm
xi1 = np.linspace(-0, 2, 200)
plt.plot(xi1, norm.pdf(xi1, loc=1.35, scale=0.05))
```
This means that we do not beleive that the value of the parameter can be less than 1.0 or greater than 1.6. Note that, we are deliberately trying to avoid the use of the term "random". There is nothing random in our example. Probability models a state of knowledge.
How does this uncertainty propagate through the model? We will study this question with a simple numerical experiment. We are going to assign Gaussian probability densities on all the $\xi_i$'s, sample them a few times, and run our catalysis model for each one.
```
def plot_samples(mu1 = 1.359, sig1=0.055,
mu2 = 1.657, sig2=0.086,
mu3 = 1.347, sig3=0.118,
mu4 = -.162, sig4=0.167,
mu5 = -1.009, sig5=0.368,
num_samples=1):
"""
Take a few samples of the model to study uncertainty propagation.
"""
fig, ax = plt.subplots(figsize=(10, 10))
catalysis_data.plot(ax=ax, style='s')
t = np.linspace(0, 180, 100)
for i in xrange(num_samples):
xi1 = norm.rvs(loc=mu1, scale=sig1)
xi2 = norm.rvs(loc=mu2, scale=sig2)
xi3 = norm.rvs(loc=mu3, scale=sig3)
xi4 = norm.rvs(loc=mu4, scale=sig4)
xi5 = norm.rvs(loc=mu5, scale=sig5)
kappa = np.exp([xi1, xi2, xi3, xi4, xi5]) / 180.
y = rk45(f_catalysis, (500., 0., 0., 0., 0., 0.), t, args=(kappa,))
ax.plot(t, y[:, 0], linewidth=0.5, color=sns.color_palette()[0])#, label='Model NO3-')
ax.plot(t, y[:, 1], linewidth=0.5, color=sns.color_palette()[1])#, label='Model NO2-')
ax.plot(t, y[:, 2], linewidth=0.5, color=sns.color_palette()[5])#, label='Model X')
ax.plot(t, y[:, 3], linewidth=0.5, color=sns.color_palette()[2])#, label='Model N2')
ax.plot(t, y[:, 4], linewidth=0.5, color=sns.color_palette()[3])#, label='Model NH3')
ax.plot(t, y[:, 5], linewidth=0.5, color=sns.color_palette()[4])#, label='Model N2O')
plt.legend()
interactive(plot_samples, mu1 = (-2, 2, 0.05), sig1=(0.02, 0.4, 0.01),
mu2 = (-2, 2, 0.05), sig2=(0.02, 0.4, 0.01),
mu3 = (-2, 2, 0.05), sig3=(0.02, 0.4, 0.01),
mu4 = (-2, 2, 0.05), sig4=(0.02, 0.4, 0.01),
mu5 = (-2, 2, 0.05), sig5=(0.02, 0.4, 0.01),
num_samples=(1, 1100, 10))
```
### Questions 03
Increase the number of samples from 1, to 10, to 100, to 1000. Each time you get a better description of uncertainty. This is a Monte Carlo simulation. Then, take the survey.
<span><a class="pd-embed" id="pd1514457576895" href="http://ebilionis.polldaddy.com/s/handout-01-03">Click here to respond.</a></span>
<script type="text/javascript">
var _polldaddy = [] || _polldaddy;
_polldaddy.push( {
type: 'button',
title: 'Click here to respond.',
style: 'inline',
domain: 'ebilionis.polldaddy.com/s/',
id: 'handout-01-03',
placeholder: 'pd1514457576895'
} );
(function(d,c,j){if(!document.getElementById(j)){var pd=d.createElement(c),s;pd.id=j;pd.src=('https:'==document.location.protocol)?'https://polldaddy.com/survey.js':'http://i0.poll.fm/survey.js';s=document.getElementsByTagName(c)[0];s.parentNode.insertBefore(pd,s);}}(document,'script','pd-embed'));
</script>
|
github_jupyter
|
# If this fails, you haven't uploaded "catalysis.csv".
# Repeat 11 of the instructions.
import pandas as pd
catalysis_data = pd.read_csv('catalysis.csv', index_col=0)
catalysis_data
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline
catalysis_data.plot()
catalysis_data.sum(axis=1)
import numpy as np
def rk45( f, y0, t, args=() ):
"""Fourth-order Runge-Kutta method with error estimate.
USAGE:
y = rk45(f, x0, t, args=())
INPUT:
f - function of x and t equal to dx/dt. x may be multivalued,
in which case it should a list or a NumPy array. In this
case f must return a NumPy array with the same dimension
as x.
y0 - the initial condition(s). Specifies the value of x when
t = t[0]. Can be either a scalar or a list or NumPy array
if a system of equations is being solved.
t - list or NumPy array of t values to compute solution at.
t[0] is the the initial condition point, and the difference
h=t[i+1]-t[i] determines the step size h.
args - any other parameters of the function f.
OUTPUT:
y - NumPy array containing solution values corresponding to each
entry in t array. If a system is being solved, x will be
an array of arrays.
NOTES:
This version is based on the algorithm presented in "Numerical
Mathematics and Computing" 6th Edition, by Cheney and Kincaid,
Brooks-Cole, 2008.
"""
# Coefficients used to compute the independent variable argument of f
c20 = 2.500000000000000e-01 # 1/4
c30 = 3.750000000000000e-01 # 3/8
c40 = 9.230769230769231e-01 # 12/13
c50 = 1.000000000000000e+00 # 1
c60 = 5.000000000000000e-01 # 1/2
# Coefficients used to compute the dependent variable argument of f
c21 = 2.500000000000000e-01 # 1/4
c31 = 9.375000000000000e-02 # 3/32
c32 = 2.812500000000000e-01 # 9/32
c41 = 8.793809740555303e-01 # 1932/2197
c42 = -3.277196176604461e+00 # -7200/2197
c43 = 3.320892125625853e+00 # 7296/2197
c51 = 2.032407407407407e+00 # 439/216
c52 = -8.000000000000000e+00 # -8
c53 = 7.173489278752436e+00 # 3680/513
c54 = -2.058966861598441e-01 # -845/4104
c61 = -2.962962962962963e-01 # -8/27
c62 = 2.000000000000000e+00 # 2
c63 = -1.381676413255361e+00 # -3544/2565
c64 = 4.529727095516569e-01 # 1859/4104
c65 = -2.750000000000000e-01 # -11/40
# Coefficients used to compute 4th order RK estimate
a1 = 1.157407407407407e-01 # 25/216
a2 = 0.000000000000000e-00 # 0
a3 = 5.489278752436647e-01 # 1408/2565
a4 = 5.353313840155945e-01 # 2197/4104
a5 = -2.000000000000000e-01 # -1/5
b1 = 1.185185185185185e-01 # 16.0/135.0
b2 = 0.000000000000000e-00 # 0
b3 = 5.189863547758284e-01 # 6656.0/12825.0
b4 = 5.061314903420167e-01 # 28561.0/56430.0
b5 = -1.800000000000000e-01 # -9.0/50.0
b6 = 3.636363636363636e-02 # 2.0/55.0
n = len( t )
y = np.array( [ y0 ] * n )
for i in xrange( n - 1 ):
h = t[i+1] - t[i]
k1 = h * f( y[i], t[i], *args )
k2 = h * f( y[i] + c21 * k1, t[i] + c20 * h, *args )
k3 = h * f( y[i] + c31 * k1 + c32 * k2, t[i] + c30 * h, *args )
k4 = h * f( y[i] + c41 * k1 + c42 * k2 + c43 * k3, t[i] + c40 * h, *args )
k5 = h * f( y[i] + c51 * k1 + c52 * k2 + c53 * k3 + c54 * k4, \
t[i] + h, *args )
k6 = h * f( \
y[i] + c61 * k1 + c62 * k2 + c63 * k3 + c64 * k4 + c65 * k5, \
t[i] + c60 * h, *args )
y[i+1] = y[i] + a1 * k1 + a3 * k3 + a4 * k4 + a5 * k5
y5 = y[i] + b1 * k1 + b3 * k3 + b4 * k4 + b5 * k5 + b6 * k6
return y
def f_catalysis(y, t, kappa):
rhs = np.zeros((6,))
rhs[0] = -kappa[0] * y[0]
rhs[1] = kappa[0] * y[0] - (kappa[1] + kappa[3] + kappa[4]) * y[1]
rhs[2] = kappa[1] * y[1] - kappa[2] * y[2]
rhs[3] = kappa[2] * y[2]
rhs[4] = kappa[3] * y[1]
rhs[5] = kappa[4] * y[1]
return rhs
from ipywidgets import interactive
def compare_model_to_data(xi1 = 1.359, xi2 = 1.657, xi3 = 1.347, xi4 = -.162, xi5 = -1.009):
"""
Compare the model predictions to the data.
"""
t = np.linspace(0, 180, 100)
kappa = np.exp([xi1, xi2, xi3, xi4, xi5]) / 180.
y = rk45(f_catalysis, (500., 0., 0., 0., 0., 0.), t, args=(kappa,))
fig, ax = plt.subplots(figsize=(10, 10))
catalysis_data.plot(ax=ax, style='s')
ax.plot(t, y[:, 0], color=sns.color_palette()[0], label='Model NO3-')
ax.plot(t, y[:, 1], color=sns.color_palette()[1], label='Model NO2-')
ax.plot(t, y[:, 2], color=sns.color_palette()[5], label='Model X')
ax.plot(t, y[:, 3], color=sns.color_palette()[2], label='Model N2')
ax.plot(t, y[:, 4], color=sns.color_palette()[3], label='Model NH3')
ax.plot(t, y[:, 5], color=sns.color_palette()[4], label='Model N2O')
plt.legend()
interactive(compare_model_to_data, xi1 = (-2, 2, 0.05), xi2 = (-2, 2, 0.05), xi3 = (-2, 2, 0.05),
xi4 = (-2, 2, 0.05), xi5 = (-2, 2, 0.05) )
import scipy.stats
from scipy.stats import norm
xi1 = np.linspace(-0, 2, 200)
plt.plot(xi1, norm.pdf(xi1, loc=1.35, scale=0.05))
def plot_samples(mu1 = 1.359, sig1=0.055,
mu2 = 1.657, sig2=0.086,
mu3 = 1.347, sig3=0.118,
mu4 = -.162, sig4=0.167,
mu5 = -1.009, sig5=0.368,
num_samples=1):
"""
Take a few samples of the model to study uncertainty propagation.
"""
fig, ax = plt.subplots(figsize=(10, 10))
catalysis_data.plot(ax=ax, style='s')
t = np.linspace(0, 180, 100)
for i in xrange(num_samples):
xi1 = norm.rvs(loc=mu1, scale=sig1)
xi2 = norm.rvs(loc=mu2, scale=sig2)
xi3 = norm.rvs(loc=mu3, scale=sig3)
xi4 = norm.rvs(loc=mu4, scale=sig4)
xi5 = norm.rvs(loc=mu5, scale=sig5)
kappa = np.exp([xi1, xi2, xi3, xi4, xi5]) / 180.
y = rk45(f_catalysis, (500., 0., 0., 0., 0., 0.), t, args=(kappa,))
ax.plot(t, y[:, 0], linewidth=0.5, color=sns.color_palette()[0])#, label='Model NO3-')
ax.plot(t, y[:, 1], linewidth=0.5, color=sns.color_palette()[1])#, label='Model NO2-')
ax.plot(t, y[:, 2], linewidth=0.5, color=sns.color_palette()[5])#, label='Model X')
ax.plot(t, y[:, 3], linewidth=0.5, color=sns.color_palette()[2])#, label='Model N2')
ax.plot(t, y[:, 4], linewidth=0.5, color=sns.color_palette()[3])#, label='Model NH3')
ax.plot(t, y[:, 5], linewidth=0.5, color=sns.color_palette()[4])#, label='Model N2O')
plt.legend()
interactive(plot_samples, mu1 = (-2, 2, 0.05), sig1=(0.02, 0.4, 0.01),
mu2 = (-2, 2, 0.05), sig2=(0.02, 0.4, 0.01),
mu3 = (-2, 2, 0.05), sig3=(0.02, 0.4, 0.01),
mu4 = (-2, 2, 0.05), sig4=(0.02, 0.4, 0.01),
mu5 = (-2, 2, 0.05), sig5=(0.02, 0.4, 0.01),
num_samples=(1, 1100, 10))
| 0.508788 | 0.986777 |
```
#https://www.tensorflow.org/tutorials/keras/text_classification
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
from tensorflow import keras
!pip install -q tensorflow-datasets
import tensorflow_datasets as tfds
tfds.disable_progress_bar()
import numpy as np
print(tf.__version__)
(train_data, test_data), info = tfds.load(
# Use the version pre-encoded with an ~8k vocabulary.
'imdb_reviews/subwords8k',
# Return the train/test datasets as a tuple.
split = (tfds.Split.TRAIN, tfds.Split.TEST),
# Return (example, label) pairs from the dataset (instead of a dictionary).
as_supervised=True,
# Also return the `info` structure.
with_info=True)
# dataset "info" includes the text encoder (a tfds.features.text.SubwordTextEncoder).
encoder = info.features['text'].encoder
print ('Vocabulary size: {}'.format(encoder.vocab_size))
# encoder can/will reversibly encode any string:
sample_string = 'Hello TensorFlow.'
encoded_string = encoder.encode(sample_string)
print ('Encoded string is {}'.format(encoded_string))
original_string = encoder.decode(encoded_string)
print ('The original string: "{}"'.format(original_string))
assert original_string == sample_string
# encoder encodes the string by breaking it into subwords/characters if the word is not in it's dictionary
# thus: the more a string resembles a dataset, the shorter the encoded representation will be
for ts in encoded_string:
print ('{} ----> {}'.format(ts, encoder.decode([ts])))
# dataset is preprocessed - each example is array of int representing the words of the movie review content
for train_example, train_label in train_data.take(1):
print('Encoded text:', train_example[:10].numpy())
print('Label:', train_label.numpy())
# `info` structure contains the encoder/decoder ---> encoder can be used to recover the original text:
encoder.decode(train_example)
#prep data for training
# each batch will have a shape of (batch_size,sequence_length) due to dynamic padding length for each batch
# padded_batch is used to zero-pad the sequences while batching (due to the reviews being different lengths)
BUFFER_SIZE = 1000
train_batches = (
train_data
.shuffle(BUFFER_SIZE)
.padded_batch(32, train_data.output_shapes))
test_batches = (
test_data
.padded_batch(32, train_data.output_shapes))
for example_batch, label_batch in train_batches.take(2):
print("Batch shape:", example_batch.shape)
print("label shape:", label_batch.shape)
```
The layers are stacked sequentially to build the classifier:
1. The first layer is an Embedding layer. This layer takes the integer-encoded vocabulary and looks up the embedding vector for each word-index. These vectors are learned as the model trains. The vectors add a dimension to the output array. The resulting dimensions are: (batch, sequence, embedding).
2. Next, a GlobalAveragePooling1D layer returns a fixed-length output vector for each example by averaging over the sequence dimension. This allows the model to handle input of variable length, in the simplest way possible.
3. This fixed-length output vector is piped through a fully-connected (Dense) layer with 16 hidden units.
4. The last layer is densely connected with a single output node. Using the sigmoid activation function, this value is a float between 0 and 1, representing a probability, or confidence level.
```
# build model
model = keras.Sequential([
keras.layers.Embedding(encoder.vocab_size, 16),
keras.layers.GlobalAveragePooling1D(),
keras.layers.Dense(1, activation='sigmoid')])
model.summary()
# compile
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
# train
history = model.fit(train_batches,
epochs=20, #10
validation_data=test_batches,
validation_steps=30)
# evaluate model
loss, accuracy = model.evaluate(test_batches)
print("Loss: ", loss)
print("Accuracy: ", accuracy)
# visualize accuracy/loss over time
history_dict = history.history
history_dict.keys()
# loss,accuracy,validation loss and validation accuracy are all metrics monitored/recorded during training
import matplotlib.pyplot as plt
acc = history_dict['accuracy']
val_acc = history_dict['val_accuracy']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
# "bo" is for "blue dot"
plt.plot(epochs, loss, 'bo', label='Training loss')
# b is for "solid blue line"
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.clf() # clear figure
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend(loc='lower right')
plt.show()
# training loss decrases with each epoch and accuracy increases ===> expected when using SGD
# validation loss and accuracy peak after about 10-20 epochs ---> example of overfitting
```
|
github_jupyter
|
#https://www.tensorflow.org/tutorials/keras/text_classification
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
from tensorflow import keras
!pip install -q tensorflow-datasets
import tensorflow_datasets as tfds
tfds.disable_progress_bar()
import numpy as np
print(tf.__version__)
(train_data, test_data), info = tfds.load(
# Use the version pre-encoded with an ~8k vocabulary.
'imdb_reviews/subwords8k',
# Return the train/test datasets as a tuple.
split = (tfds.Split.TRAIN, tfds.Split.TEST),
# Return (example, label) pairs from the dataset (instead of a dictionary).
as_supervised=True,
# Also return the `info` structure.
with_info=True)
# dataset "info" includes the text encoder (a tfds.features.text.SubwordTextEncoder).
encoder = info.features['text'].encoder
print ('Vocabulary size: {}'.format(encoder.vocab_size))
# encoder can/will reversibly encode any string:
sample_string = 'Hello TensorFlow.'
encoded_string = encoder.encode(sample_string)
print ('Encoded string is {}'.format(encoded_string))
original_string = encoder.decode(encoded_string)
print ('The original string: "{}"'.format(original_string))
assert original_string == sample_string
# encoder encodes the string by breaking it into subwords/characters if the word is not in it's dictionary
# thus: the more a string resembles a dataset, the shorter the encoded representation will be
for ts in encoded_string:
print ('{} ----> {}'.format(ts, encoder.decode([ts])))
# dataset is preprocessed - each example is array of int representing the words of the movie review content
for train_example, train_label in train_data.take(1):
print('Encoded text:', train_example[:10].numpy())
print('Label:', train_label.numpy())
# `info` structure contains the encoder/decoder ---> encoder can be used to recover the original text:
encoder.decode(train_example)
#prep data for training
# each batch will have a shape of (batch_size,sequence_length) due to dynamic padding length for each batch
# padded_batch is used to zero-pad the sequences while batching (due to the reviews being different lengths)
BUFFER_SIZE = 1000
train_batches = (
train_data
.shuffle(BUFFER_SIZE)
.padded_batch(32, train_data.output_shapes))
test_batches = (
test_data
.padded_batch(32, train_data.output_shapes))
for example_batch, label_batch in train_batches.take(2):
print("Batch shape:", example_batch.shape)
print("label shape:", label_batch.shape)
# build model
model = keras.Sequential([
keras.layers.Embedding(encoder.vocab_size, 16),
keras.layers.GlobalAveragePooling1D(),
keras.layers.Dense(1, activation='sigmoid')])
model.summary()
# compile
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
# train
history = model.fit(train_batches,
epochs=20, #10
validation_data=test_batches,
validation_steps=30)
# evaluate model
loss, accuracy = model.evaluate(test_batches)
print("Loss: ", loss)
print("Accuracy: ", accuracy)
# visualize accuracy/loss over time
history_dict = history.history
history_dict.keys()
# loss,accuracy,validation loss and validation accuracy are all metrics monitored/recorded during training
import matplotlib.pyplot as plt
acc = history_dict['accuracy']
val_acc = history_dict['val_accuracy']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
# "bo" is for "blue dot"
plt.plot(epochs, loss, 'bo', label='Training loss')
# b is for "solid blue line"
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.clf() # clear figure
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend(loc='lower right')
plt.show()
# training loss decrases with each epoch and accuracy increases ===> expected when using SGD
# validation loss and accuracy peak after about 10-20 epochs ---> example of overfitting
| 0.913561 | 0.860955 |
# COVİD-19 CAN KAYBI TAHMİNİ
```
import pandas as pd
from fbprophet import Prophet
from sklearn.metrics import mean_absolute_error
import warnings; warnings.simplefilter('ignore')
import matplotlib.pyplot as plt
df = pd.read_csv(r'corona.csv')
df.head()
df= df[['DAY','DEAD_TODAY','DEAD_TOMORROW']].copy()
df.tail()
#Gün sayısı
len(df.index)
#Günlük vefat sayısı ortalaması
df["DEAD_TODAY"].mean()
df['ds'] = pd.to_datetime(df['DAY'], dayfirst = True)
df.plot(x='ds', figsize=(10, 5))
print(df['ds'])
```
# 1)PROPHET MODELİ İCİN DATAMI HAZIRLIYORUM:
```
#Prophet Modeli:Prophet modeli için girdi olarak ‘ds’: tarih ve ‘y’: tahmin etmek istediğimiz nümerik değeri girmemiz gerekiyor
#projede y değişkenimiz olarak günlük ölüm sayısını veriyoruz
newdf = df[['ds', 'DEAD_TODAY']].copy()
df.drop(['ds'], axis=1, inplace=True)
newdf.rename(columns={'DEAD_TODAY': 'y'}, inplace=True)
newdf.tail()
#Bu hücrede amaç prophet modeliyle modelin taban ve tvan değerlerini elde etmek
m = Prophet( ) #prophet sınıfından m nesnemi yaratıyorum
m.fit(newdf) #modelimi verimle eğiten "fit" fonksiyonuna newdf dataframe ini veriyorum
horizon= 1
future = m.make_future_dataframe(periods=horizon)
forecast = m.predict(future)
forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail()
fig1 = m.plot(forecast)
#bu kısımdada prophet modelinin yaptığı yahminin hata oranını hesaplıyoruz
MAE={}
MAE['Prophet'] = mean_absolute_error(newdf['y'], forecast[:-horizon]['yhat'])
print("MAE : {}".format(MAE))
#### Prophet metodunun bir gün sonrası için yaptığı tahmin ile gerçekte o gün ölen kişilerin grafik gösterimi
comparison= pd.DataFrame()
comparison['ds']=newdf['ds'].copy()
comparison['DEAD_TOMORROW']=df['DEAD_TOMORROW'].copy()
comparison['Prediction_Prophet'] = forecast[:-1].yhat
comparison.plot(title="comparison",x='ds',figsize=(20, 6))
```
# 2)SİMPLE MOVİNG AVERAGE (SMA) ==>Ortalama alma yöntemi
```
window= 3
df['Prediction_ SMA_3'] = df.iloc[:,1].rolling(window=window).mean()
df.head()
rstd = df.iloc[:,2].rolling(window=window).std() #2.kolonun standart sapması alınıyor
bands = pd.DataFrame()
bands['DAY']= df['DAY'].copy()
bands['lower'] = df['Prediction_ SMA_3'] - 2 * rstd #2.kolondan bulunan std sapma yardımıyla lower değer hesaplanıyor
bands['upper'] = df['Prediction_ SMA_3'] + 2 * rstd #2.kolondan bulunan std sapma yardımıyla upper değer hesaplanıyor
bands = bands.join(df['DEAD_TOMORROW']).join(df['Prediction_ SMA_3'])
fig = plt.figure(figsize=(20, 6))
ax = bands.plot(title='Prediction_ SMA_3', figsize=(20, 6))
ax.fill_between(bands.index, bands['lower'], bands['upper'], color='#ADCCFF', alpha=0.4)
ax.set_xlabel('date')
ax.set_ylabel('DEAD TOMORROW')
ax.grid()
plt.show()
#Prophet metdoduyla SMA methodunun hata oranları kıyas edilip "MAE" kümesine hata oranları ekleniyor
#SMA methodu hata oranı prophet modeline oranla daha az çıkıyor
MAE['SMA_3'] = mean_absolute_error(df[2:-1]['DEAD_TOMORROW'], df[2:-1]['Prediction_ SMA_3'])
print("MAE : {}".format(MAE))
#tahminlerimizi tuttuğumuz comparison dataframe içerisine hergün için SMA_3 modelinin çıktılarını bir seri olarak yazıyoruz
comparison['Prediction_SMA_3'] = df['Prediction_ SMA_3']
print(comparison.tail())
comparison.plot(title="comparison",x='ds',figsize=(20, 6))
##Grafiktede görüldüğü üzere yeşil çizgi yani SMA doğrusu gerçek ölüm oranları yani mavi çizgiyle uyumlu gidiyor
```
# 3)Exponential Moving Average (EMA)
```
#1.sütun baz alınıp predict EMA_3 kolonu değerleri elde ediliyor
df['Prediction_EMA_3'] = df.iloc[:,1].ewm(span=window,adjust=False).mean()
df.head()
#EMA için standart sapma hesaplanıp matematiksel operasyonları gerçekleştiriyoruz
rstd = df.iloc[:,2].rolling(window=window).std()
bands = pd.DataFrame()
bands['DAY']= df['DAY'].copy()
bands['lower'] = df['Prediction_EMA_3'] - 2 * rstd
bands['upper'] = df['Prediction_EMA_3'] + 2 * rstd
bands = bands.join(df['DEAD_TOMORROW']).join(df['Prediction_EMA_3'])
fig = plt.figure(figsize=(20, 6))
ax = bands.plot(title='Prediction_EMA_3', figsize=(20, 6))
ax.fill_between(bands.index, bands['lower'], bands['upper'], color='#ADCCFF', alpha=0.4)
ax.set_xlabel('date')
ax.set_ylabel('DEAD TOMORROW')
ax.grid()
plt.show()
#Diğer methodlarla EMA methodunun hata oranları kıyas edilip "MAE" kümesine hata oranları ekleniyor
#EMA methodu hata oranı prophet modeline oranla daha az çıkıyor
MAE['EMA_3'] = mean_absolute_error(df[1:-1]['DEAD_TOMORROW'], df[1:-1]['Prediction_EMA_3'])
print("MAE : {}".format(MAE))
#3 MODEL VE GERÇEK DEĞERLERİN OLDUĞU GRAFİĞİ OLUŞTURUYORUZ
comparison['Prediction_EMA_3'] = df['Prediction_EMA_3']
comparison.plot(title="comparison",x='ds',figsize=(20, 6))
print('Mean Absolute Errors (MAE): {}'.format(MAE))
rstd.tail()
#EMA_3'E AİT GÜNLÜK TAHMİNLER VE GERÇEK DEĞERLER
bands.tail()
#3 MODELİN HATA ORANI GRAFİĞİ
errorsDF = pd.DataFrame(MAE, index=['MAE'])
ax = errorsDF.plot.bar(rot=0, figsize=(10, 7))
```
|
github_jupyter
|
import pandas as pd
from fbprophet import Prophet
from sklearn.metrics import mean_absolute_error
import warnings; warnings.simplefilter('ignore')
import matplotlib.pyplot as plt
df = pd.read_csv(r'corona.csv')
df.head()
df= df[['DAY','DEAD_TODAY','DEAD_TOMORROW']].copy()
df.tail()
#Gün sayısı
len(df.index)
#Günlük vefat sayısı ortalaması
df["DEAD_TODAY"].mean()
df['ds'] = pd.to_datetime(df['DAY'], dayfirst = True)
df.plot(x='ds', figsize=(10, 5))
print(df['ds'])
#Prophet Modeli:Prophet modeli için girdi olarak ‘ds’: tarih ve ‘y’: tahmin etmek istediğimiz nümerik değeri girmemiz gerekiyor
#projede y değişkenimiz olarak günlük ölüm sayısını veriyoruz
newdf = df[['ds', 'DEAD_TODAY']].copy()
df.drop(['ds'], axis=1, inplace=True)
newdf.rename(columns={'DEAD_TODAY': 'y'}, inplace=True)
newdf.tail()
#Bu hücrede amaç prophet modeliyle modelin taban ve tvan değerlerini elde etmek
m = Prophet( ) #prophet sınıfından m nesnemi yaratıyorum
m.fit(newdf) #modelimi verimle eğiten "fit" fonksiyonuna newdf dataframe ini veriyorum
horizon= 1
future = m.make_future_dataframe(periods=horizon)
forecast = m.predict(future)
forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail()
fig1 = m.plot(forecast)
#bu kısımdada prophet modelinin yaptığı yahminin hata oranını hesaplıyoruz
MAE={}
MAE['Prophet'] = mean_absolute_error(newdf['y'], forecast[:-horizon]['yhat'])
print("MAE : {}".format(MAE))
#### Prophet metodunun bir gün sonrası için yaptığı tahmin ile gerçekte o gün ölen kişilerin grafik gösterimi
comparison= pd.DataFrame()
comparison['ds']=newdf['ds'].copy()
comparison['DEAD_TOMORROW']=df['DEAD_TOMORROW'].copy()
comparison['Prediction_Prophet'] = forecast[:-1].yhat
comparison.plot(title="comparison",x='ds',figsize=(20, 6))
window= 3
df['Prediction_ SMA_3'] = df.iloc[:,1].rolling(window=window).mean()
df.head()
rstd = df.iloc[:,2].rolling(window=window).std() #2.kolonun standart sapması alınıyor
bands = pd.DataFrame()
bands['DAY']= df['DAY'].copy()
bands['lower'] = df['Prediction_ SMA_3'] - 2 * rstd #2.kolondan bulunan std sapma yardımıyla lower değer hesaplanıyor
bands['upper'] = df['Prediction_ SMA_3'] + 2 * rstd #2.kolondan bulunan std sapma yardımıyla upper değer hesaplanıyor
bands = bands.join(df['DEAD_TOMORROW']).join(df['Prediction_ SMA_3'])
fig = plt.figure(figsize=(20, 6))
ax = bands.plot(title='Prediction_ SMA_3', figsize=(20, 6))
ax.fill_between(bands.index, bands['lower'], bands['upper'], color='#ADCCFF', alpha=0.4)
ax.set_xlabel('date')
ax.set_ylabel('DEAD TOMORROW')
ax.grid()
plt.show()
#Prophet metdoduyla SMA methodunun hata oranları kıyas edilip "MAE" kümesine hata oranları ekleniyor
#SMA methodu hata oranı prophet modeline oranla daha az çıkıyor
MAE['SMA_3'] = mean_absolute_error(df[2:-1]['DEAD_TOMORROW'], df[2:-1]['Prediction_ SMA_3'])
print("MAE : {}".format(MAE))
#tahminlerimizi tuttuğumuz comparison dataframe içerisine hergün için SMA_3 modelinin çıktılarını bir seri olarak yazıyoruz
comparison['Prediction_SMA_3'] = df['Prediction_ SMA_3']
print(comparison.tail())
comparison.plot(title="comparison",x='ds',figsize=(20, 6))
##Grafiktede görüldüğü üzere yeşil çizgi yani SMA doğrusu gerçek ölüm oranları yani mavi çizgiyle uyumlu gidiyor
#1.sütun baz alınıp predict EMA_3 kolonu değerleri elde ediliyor
df['Prediction_EMA_3'] = df.iloc[:,1].ewm(span=window,adjust=False).mean()
df.head()
#EMA için standart sapma hesaplanıp matematiksel operasyonları gerçekleştiriyoruz
rstd = df.iloc[:,2].rolling(window=window).std()
bands = pd.DataFrame()
bands['DAY']= df['DAY'].copy()
bands['lower'] = df['Prediction_EMA_3'] - 2 * rstd
bands['upper'] = df['Prediction_EMA_3'] + 2 * rstd
bands = bands.join(df['DEAD_TOMORROW']).join(df['Prediction_EMA_3'])
fig = plt.figure(figsize=(20, 6))
ax = bands.plot(title='Prediction_EMA_3', figsize=(20, 6))
ax.fill_between(bands.index, bands['lower'], bands['upper'], color='#ADCCFF', alpha=0.4)
ax.set_xlabel('date')
ax.set_ylabel('DEAD TOMORROW')
ax.grid()
plt.show()
#Diğer methodlarla EMA methodunun hata oranları kıyas edilip "MAE" kümesine hata oranları ekleniyor
#EMA methodu hata oranı prophet modeline oranla daha az çıkıyor
MAE['EMA_3'] = mean_absolute_error(df[1:-1]['DEAD_TOMORROW'], df[1:-1]['Prediction_EMA_3'])
print("MAE : {}".format(MAE))
#3 MODEL VE GERÇEK DEĞERLERİN OLDUĞU GRAFİĞİ OLUŞTURUYORUZ
comparison['Prediction_EMA_3'] = df['Prediction_EMA_3']
comparison.plot(title="comparison",x='ds',figsize=(20, 6))
print('Mean Absolute Errors (MAE): {}'.format(MAE))
rstd.tail()
#EMA_3'E AİT GÜNLÜK TAHMİNLER VE GERÇEK DEĞERLER
bands.tail()
#3 MODELİN HATA ORANI GRAFİĞİ
errorsDF = pd.DataFrame(MAE, index=['MAE'])
ax = errorsDF.plot.bar(rot=0, figsize=(10, 7))
| 0.253399 | 0.748076 |
```
import sys
sys.path.append('../../pyutils')
import numpy as np
import scipy.linalg
import metrics
np.random.seed(12)
```
# LU Decomposition
Let $A$ matrix of size $n * n$. Some (not all) non-singular (invertible) matrices can be decomposed with the LU Decomposition.
$A = LU$ with $L$ lower unit triangular matrix (1s on diagonal), and $U$ upper unit triangular matrix.
```
def lu(a):
n = a.shape[0]
l = np.eye(n)
u = a.copy()
for j in range(n):
pivot = u[j, j] #might be null, decomposition fails if no perms
for i in range(j+1, n):
#coeff is selected to put 0 in u[i, j]
coeff = - u[i, j] / pivot
l[i, j] = - coeff
u[i] += coeff * u[j]
return l, u
A = np.random.randn(4, 4)
l, u = lu(A)
print(metrics.is_ltri(l))
print(metrics.is_utri(u))
print(metrics.tdist(A, l @ u))
print(l)
print(u)
```
## Solve systems of equations
Let $A$ matrix of size $n * n$, $b$ and $x$ vectors of size $n$.
Find $x$ such that $Ax = b$.
Let $A = LU$.
$$Ax = b$$
$$LUx = b$$
Let $c = Ux$.
Solve $Lc = b$ with forward subtition.
Solve $Ux = c$ with backward subtition.
```
def solve_lu(A, b):
L, U = lu(A)
c = scipy.linalg.solve_triangular(L, b, lower=True)
x = scipy.linalg.solve_triangular(U, c)
return x
A = np.random.randn(4, 4)
b = np.random.randn(4)
x = solve_lu(A, b)
x_sol = scipy.linalg.solve(A, b)
print(x)
print(x_sol)
print(metrics.tdist(x, x_sol))
print(metrics.tdist(A @ x, b))
```
The same algorithm also works to solve matrix systems.
Solve $AX = B$ with $A$ matrix of size $n * n$, $B$ matrix of size $n * p$, and $X$ matrix of size $n * p$
```
A = np.random.randn(4, 4)
b = np.random.randn(4, 3)
x = solve_lu(A, b)
x_sol = scipy.linalg.solve(A, b)
print(x)
print(x_sol)
print(metrics.tdist(x, x_sol))
print(metrics.tdist(A @ x, b))
```
It's possible to find the inverse of a matrix by solving $AX = B$ for $B = I$.
$X = A^{-1}$
```
def inv_lu(A):
return solve_lu(A, np.eye(A.shape[0]))
A = np.random.randn(4, 4)
Ai = inv_lu(A)
Ai_sol = np.linalg.inv(A)
print(Ai)
print(Ai_sol)
print(metrics.tdist(Ai, Ai_sol))
print(metrics.tdist(A @ Ai, np.eye(4)))
print(metrics.tdist(Ai @ A, np.eye(4)))
```
## PLU Decomposition
Any non-singular matrix $A$ of size $n * n$ can be decomposed as:
$$PA = LU$$
$L$ lower unit triangular matrix (1s on diagonal), $U$ upper unit triangular matrix, and $P$ transposition matrix.
$P$ is used to exchange rows of $A$, in order to remove 0-pivots, that makes the $LU$ decomposition ompossible, and also to choose the biggest pivot, to have a more stable matrix.
```
def find_p(a):
n = a.shape[0]
p = np.eye(n)
for j in range(n):
#get index of biggest abs element in column j (starting at line j)
pivot = j + np.argmax(np.abs(a[j:, j]))
if pivot != j: #swap both lines
p[[j, pivot]] = p[[pivot, j]]
return p
def plu(a):
p = find_p(a)
a2 = p @ a
l, u = lu(a2)
return p, l, u
A = np.random.randn(4, 4)
p, l, u = plu(A)
print(metrics.is_ltri(l))
print(metrics.is_utri(u))
print(metrics.tdist(p @ A, l @ u))
print(p)
print(l)
print(u)
```
$PLU$ decomposition can be used to solve $Ax=b$
Let $PA = LU$.
$$Ax = b$$
$$PAx = Pb$$
$$LUx = Pb$$
Let $c = Ux$.
Solve $Lc = Pb$ with forward subtition.
Solve $Ux = c$ with backward subtition.
Similar techniques can be used to solve matrix systems, and to find the inverse of any singular matrix.
```
def solve_plu(A, b):
P, L, U = plu(A)
c = scipy.linalg.solve_triangular(L, P @ b, lower=True)
x = scipy.linalg.solve_triangular(U, c)
return x
A = np.random.randn(4, 4)
b = np.random.randn(4, 3)
x = solve_plu(A, b)
x_sol = scipy.linalg.solve(A, b)
print(x)
print(x_sol)
print(metrics.tdist(x, x_sol))
print(metrics.tdist(A @ x, b))
def inv_plu(A):
return solve_plu(A, np.eye(A.shape[0]))
A = np.random.randn(4, 4)
Ai = inv_plu(A)
Ai_sol = np.linalg.inv(A)
print(Ai)
print(Ai_sol)
print(metrics.tdist(Ai, Ai_sol))
print(metrics.tdist(A @ Ai, np.eye(4)))
print(metrics.tdist(Ai @ A, np.eye(4)))
```
|
github_jupyter
|
import sys
sys.path.append('../../pyutils')
import numpy as np
import scipy.linalg
import metrics
np.random.seed(12)
def lu(a):
n = a.shape[0]
l = np.eye(n)
u = a.copy()
for j in range(n):
pivot = u[j, j] #might be null, decomposition fails if no perms
for i in range(j+1, n):
#coeff is selected to put 0 in u[i, j]
coeff = - u[i, j] / pivot
l[i, j] = - coeff
u[i] += coeff * u[j]
return l, u
A = np.random.randn(4, 4)
l, u = lu(A)
print(metrics.is_ltri(l))
print(metrics.is_utri(u))
print(metrics.tdist(A, l @ u))
print(l)
print(u)
def solve_lu(A, b):
L, U = lu(A)
c = scipy.linalg.solve_triangular(L, b, lower=True)
x = scipy.linalg.solve_triangular(U, c)
return x
A = np.random.randn(4, 4)
b = np.random.randn(4)
x = solve_lu(A, b)
x_sol = scipy.linalg.solve(A, b)
print(x)
print(x_sol)
print(metrics.tdist(x, x_sol))
print(metrics.tdist(A @ x, b))
A = np.random.randn(4, 4)
b = np.random.randn(4, 3)
x = solve_lu(A, b)
x_sol = scipy.linalg.solve(A, b)
print(x)
print(x_sol)
print(metrics.tdist(x, x_sol))
print(metrics.tdist(A @ x, b))
def inv_lu(A):
return solve_lu(A, np.eye(A.shape[0]))
A = np.random.randn(4, 4)
Ai = inv_lu(A)
Ai_sol = np.linalg.inv(A)
print(Ai)
print(Ai_sol)
print(metrics.tdist(Ai, Ai_sol))
print(metrics.tdist(A @ Ai, np.eye(4)))
print(metrics.tdist(Ai @ A, np.eye(4)))
def find_p(a):
n = a.shape[0]
p = np.eye(n)
for j in range(n):
#get index of biggest abs element in column j (starting at line j)
pivot = j + np.argmax(np.abs(a[j:, j]))
if pivot != j: #swap both lines
p[[j, pivot]] = p[[pivot, j]]
return p
def plu(a):
p = find_p(a)
a2 = p @ a
l, u = lu(a2)
return p, l, u
A = np.random.randn(4, 4)
p, l, u = plu(A)
print(metrics.is_ltri(l))
print(metrics.is_utri(u))
print(metrics.tdist(p @ A, l @ u))
print(p)
print(l)
print(u)
def solve_plu(A, b):
P, L, U = plu(A)
c = scipy.linalg.solve_triangular(L, P @ b, lower=True)
x = scipy.linalg.solve_triangular(U, c)
return x
A = np.random.randn(4, 4)
b = np.random.randn(4, 3)
x = solve_plu(A, b)
x_sol = scipy.linalg.solve(A, b)
print(x)
print(x_sol)
print(metrics.tdist(x, x_sol))
print(metrics.tdist(A @ x, b))
def inv_plu(A):
return solve_plu(A, np.eye(A.shape[0]))
A = np.random.randn(4, 4)
Ai = inv_plu(A)
Ai_sol = np.linalg.inv(A)
print(Ai)
print(Ai_sol)
print(metrics.tdist(Ai, Ai_sol))
print(metrics.tdist(A @ Ai, np.eye(4)))
print(metrics.tdist(Ai @ A, np.eye(4)))
| 0.190875 | 0.891622 |
```
!jupyter notebook list
```
## Setup
```
%load_ext autoreload
%autoreload 2
import os
import sys
import cv2
import math
import numpy as np
import argparse
sys.path.append('../pyslam/')
from config import Config
from visual_odometry import VisualOdometry
from camera import PinholeCamera
from ground_truth import groundtruth_factory
from dataset import dataset_factory
import matplotlib.pyplot as plt
from glob import glob
from feature_tracker import feature_tracker_factory, FeatureTrackerTypes
from feature_manager import feature_manager_factory
from feature_types import FeatureDetectorTypes, FeatureDescriptorTypes, FeatureInfo
from feature_matcher import feature_matcher_factory, FeatureMatcherTypes
from tqdm import tqdm
from feature_tracker_configs import FeatureTrackerConfigs
# parser = argparse.ArgumentParser(description='Run VO')
# parser.add_argument('--n', type=str, default='default', help='experimet name')
# args = parser.parse_args()
# exp_name = args.n
model_config = FeatureTrackerConfigs.test_configs
folders = os.listdir('../data/dataset/sequences/')
folders.sort()
folders
exp_name = 'TXX_SIFT_ORB2'
print('Experiment: ', exp_name)
for f in folders:
print('Folder: ',f)
config = Config(f)
dataset = dataset_factory(config.dataset_settings)
groundtruth = groundtruth_factory(config.dataset_settings)
cam = PinholeCamera(config.cam_settings['Camera.width'], config.cam_settings['Camera.height'],
config.cam_settings['Camera.fx'], config.cam_settings['Camera.fy'],
config.cam_settings['Camera.cx'], config.cam_settings['Camera.cy'],
config.DistCoef, config.cam_settings['Camera.fps'])
num_features=2000 # how many features do you want to detect and track?
# select your tracker configuration (see the file feature_tracker_configs.py)
# LK_SHI_TOMASI, LK_FAST
# SHI_TOMASI_ORB, FAST_ORB, ORB, BRISK, AKAZE, FAST_FREAK, SIFT, ROOT_SIFT, SURF, SUPERPOINT, FAST_TFEAT
tracker_config = model_config[exp_name]
tracker_config['num_features'] = num_features
feature_tracker = feature_tracker_factory(**tracker_config)
# create visual odometry object
vo = VisualOdometry(cam, groundtruth, feature_tracker)
# todo: add the trajectory visualization
traj_img_size = 800
traj_img = np.zeros((traj_img_size, traj_img_size, 3), dtype=np.uint8)
half_traj_img_size = int(0.5*traj_img_size)
draw_scale = 1
# second loop for iterating over all the frame
result = []
for img_id in tqdm(range(dataset.max_frame_id)):
img = dataset.getImage(img_id)
if img is not None:
vo.track(img, img_id)
tmp = np.reshape(np.hstack((vo.cur_R, vo.cur_t)), 12)
result.append(' '.join([str(i) for i in tmp]))
# Save the results in the text files
res_base_path = os.path.join('../data/results/', exp_name)
res_folder_path = os.path.join(res_base_path, f+'.txt')
os.makedirs(res_base_path, exist_ok=True)
txt_file=open(res_folder_path, 'a')
txt_file.writelines("%s\n" % i for i in result)
txt_file.close()
print('Finished till:', exp_name)
import cv2
surf = cv2.xfeatures2d.SURF_create()
```
|
github_jupyter
|
!jupyter notebook list
%load_ext autoreload
%autoreload 2
import os
import sys
import cv2
import math
import numpy as np
import argparse
sys.path.append('../pyslam/')
from config import Config
from visual_odometry import VisualOdometry
from camera import PinholeCamera
from ground_truth import groundtruth_factory
from dataset import dataset_factory
import matplotlib.pyplot as plt
from glob import glob
from feature_tracker import feature_tracker_factory, FeatureTrackerTypes
from feature_manager import feature_manager_factory
from feature_types import FeatureDetectorTypes, FeatureDescriptorTypes, FeatureInfo
from feature_matcher import feature_matcher_factory, FeatureMatcherTypes
from tqdm import tqdm
from feature_tracker_configs import FeatureTrackerConfigs
# parser = argparse.ArgumentParser(description='Run VO')
# parser.add_argument('--n', type=str, default='default', help='experimet name')
# args = parser.parse_args()
# exp_name = args.n
model_config = FeatureTrackerConfigs.test_configs
folders = os.listdir('../data/dataset/sequences/')
folders.sort()
folders
exp_name = 'TXX_SIFT_ORB2'
print('Experiment: ', exp_name)
for f in folders:
print('Folder: ',f)
config = Config(f)
dataset = dataset_factory(config.dataset_settings)
groundtruth = groundtruth_factory(config.dataset_settings)
cam = PinholeCamera(config.cam_settings['Camera.width'], config.cam_settings['Camera.height'],
config.cam_settings['Camera.fx'], config.cam_settings['Camera.fy'],
config.cam_settings['Camera.cx'], config.cam_settings['Camera.cy'],
config.DistCoef, config.cam_settings['Camera.fps'])
num_features=2000 # how many features do you want to detect and track?
# select your tracker configuration (see the file feature_tracker_configs.py)
# LK_SHI_TOMASI, LK_FAST
# SHI_TOMASI_ORB, FAST_ORB, ORB, BRISK, AKAZE, FAST_FREAK, SIFT, ROOT_SIFT, SURF, SUPERPOINT, FAST_TFEAT
tracker_config = model_config[exp_name]
tracker_config['num_features'] = num_features
feature_tracker = feature_tracker_factory(**tracker_config)
# create visual odometry object
vo = VisualOdometry(cam, groundtruth, feature_tracker)
# todo: add the trajectory visualization
traj_img_size = 800
traj_img = np.zeros((traj_img_size, traj_img_size, 3), dtype=np.uint8)
half_traj_img_size = int(0.5*traj_img_size)
draw_scale = 1
# second loop for iterating over all the frame
result = []
for img_id in tqdm(range(dataset.max_frame_id)):
img = dataset.getImage(img_id)
if img is not None:
vo.track(img, img_id)
tmp = np.reshape(np.hstack((vo.cur_R, vo.cur_t)), 12)
result.append(' '.join([str(i) for i in tmp]))
# Save the results in the text files
res_base_path = os.path.join('../data/results/', exp_name)
res_folder_path = os.path.join(res_base_path, f+'.txt')
os.makedirs(res_base_path, exist_ok=True)
txt_file=open(res_folder_path, 'a')
txt_file.writelines("%s\n" % i for i in result)
txt_file.close()
print('Finished till:', exp_name)
import cv2
surf = cv2.xfeatures2d.SURF_create()
| 0.269518 | 0.448426 |
```
import pyEX
# c = pyEX.Client()
c = pyEX.Client(version='sandbox')
```
# Symbols data
```
c.symbolsDF().head()
```
## Symbols
```
c.symbolsDF().head()
c.symbols()[:5]
c.symbolsList()[:10]
#c.symbolsList()
```
## IEX Symbols
```
c.iexSymbols()[:5]
c.iexSymbolsDF().head()
c.iexSymbolsList()[:10]
```
## International Symbols
```
c.internationalSymbols()[:5]
c.internationalSymbolsDF("CA").head()
symbols=c.internationalSymbolsDF("CA")
#symbols.keys()
#symbols.loc["AAN-CV"]
len(symbols.index)
c.internationalSymbolsList()[:5]
```
## FX Symbols
```
c.fxSymbols()
dfs = c.fxSymbolsDF()
dfs[0].head()
dfs[1].head()
l = c.fxSymbolsList()
l[0][:5]
l[1][:5]
```
## Options Symbols
```
d = c.optionsSymbols()
list(d.keys())[:5]
c.optionsSymbolsDF().head()
c.optionsSymbolsList()[:5]
```
## International Exchanges
```
c.internationalExchanges()[:5]
c.internationalExchangesDF().head()
```
## US Exchanges
```
c.exchanges()[:5]
c.exchangesDF().head()
```
## US Holidays
```
c.holidays()[:5]
c.holidaysDF().head()
```
## Sectors
```
c.sectors()[:5]
c.sectorsDF().head()
```
## Tags
```
c.tags()[:5]
c.tagsDF().head()
```
# Data Points API
```
c.points()[:5]
c.pointsDF().head()
c.points(key='DGS10')
c.pointsDF('aapl').head()
c.points('aapl', 'QUOTE-LATESTPRICE')
```
# Stock API
```
symbol = 'AAPL'
```
## Advanced Stats
```
c.advancedStats(symbol)
c.advancedStatsDF(symbol)
```
## Balance Sheet
```
c.balanceSheet(symbol)
c.balanceSheetDF(symbol)
```
## Book
```
c.book(symbol)
c.bookDF(symbol).head()
```
## Cashflow
```
c.cashFlow(symbol)
c.cashFlowDF(symbol)
```
## Collections
```
c.collections?
c.collectionsDF?
```
## Company
```
c.company(symbol)
c.companyDF(symbol)
```
## Delayed Quote
```
c.delayedQuote(symbol)
c.delayedQuoteDF(symbol)
```
## Dividends
```
c.dividends(symbol)
c.dividendsDF(symbol)
```
## Earnings
```
c.earnings(symbol)
c.earningsDF(symbol)
```
## Earnings Today
```
c.earningsToday()
c.earningsTodayDF().head()
```
## Effective Spread
```
c.spread(symbol)
c.spreadDF(symbol)
```
## Estimates
```
c.estimates(symbol)
c.estimatesDF(symbol)
```
## Financials
```
c.financials(symbol)
c.financialsDF(symbol)
```
## Fund Ownership
```
c.fundOwnership(symbol)
c.fundOwnershipDF(symbol)
```
## Historical Prices
```
c.chart(symbol)
c.chartDF(symbol)
```
## Income Statement
```
c.incomeStatement(symbol)
c.incomeStatementDF(symbol)
```
## Insider Roster
```
c.insiderRoster(symbol)
c.insiderRosterDF(symbol)
```
## Insider Summary
```
c.insiderSummary(symbol)
c.insiderSummaryDF(symbol)
```
## Insider Transactions
```
c.insiderTransactions(symbol)[:5]
c.insiderTransactionsDF(symbol)
```
## Institutional Ownership
```
c.institutionalOwnership(symbol)
c.institutionalOwnershipDF(symbol)
```
## Intraday Prices
```
c.intraday(symbol)[:5]
c.intradayDF(symbol).head()
```
## IPO Calendar
```
c.ipoToday()
c.ipoTodayDF()
c.ipoUpcoming()
c.ipoUpcomingDF()
```
## Key Stats
```
c.keyStats(symbol)
c.keyStatsDF(symbol)
```
## Largest Trades
```
c.largestTrades(symbol)
c.largestTradesDF(symbol)
```
## List
```
c.list()[:5]
c.listDF().head()
```
## Logo
```
c.logo(symbol)
c.logoPNG(symbol) # Not available for sandbox
c.logoNotebook(symbol) # Not available for sandbox
```
## Market Volume
```
c.marketVolume()[:5]
c.marketVolumeDF()
```
## News
```
c.news(symbol)[:5]
c.newsDF(symbol).head()
c.marketNews()[:5]
c.marketNewsDF().head()
```
## OHLC
```
c.ohlc(symbol)
c.ohlcDF(symbol)
```
## Open/Close Price
```
c.ohlc(symbol)
c.ohlcDF(symbol)
```
## Options
```
exps = c.optionExpirations(symbol)
exps
c.options(symbol, exps[0])[:5]
c.optionsDF(symbol, exps[0]).head()
```
## Peers
```
c.peers(symbol)
c.peersDF(symbol)
```
## Previous Day Price
```
c.yesterday(symbol)
c.yesterdayDF(symbol)
c.marketYesterday()['A']
c.marketYesterdayDF().head()
```
## Price
```
c.price(symbol)
c.priceDF(symbol)
```
## Price Target
```
c.priceTarget(symbol)
c.priceTargetDF(symbol)
```
## Quote
```
c.quote(symbol)
c.quoteDF(symbol)
```
# Recommendation Trends
# Sector Performance
```
c.sectorPerformance()
c.sectorPerformanceDF()
```
## Splits
```
c.splits(symbol)
c.splitsDF(symbol)
```
## Upcoming Events
## Volume By Venue
```
c.volumeByVenue(symbol)
c.volumeByVenueDF(symbol)
```
# Alternative
## crypto
```
# c.crypto()
# c.cryptoDF()
```
## sentiment
```
c.sentiment(symbol)
c.sentimentDF(symbol)
```
## CEO Compensation
```
c.ceoCompensation(symbol)
c.ceoCompensationDF(symbol)
```
# Forex
## Exchange Rates
# IEX Data
## TOPS
## Last
## DEEP
## DEEP Auction
## DEEP Book
## DEEP Operational Halt Status
## DEEP Official Price
## DEEP Security Event
## DEEP Short Sale Price Test Status
## DEEP System Event
## DEEP Trades
## DEEP Trade Break
## DEEP Trading Status
## Listed Regulation SHO Threshold Securities List In Dev
## Listed Short Interest List In Dev
## Stats Historical Daily In Dev
## Stats Historical Summary
## Stats Intraday
## Stats Recent
## Stats Records
# API Metadata
|
github_jupyter
|
import pyEX
# c = pyEX.Client()
c = pyEX.Client(version='sandbox')
c.symbolsDF().head()
c.symbolsDF().head()
c.symbols()[:5]
c.symbolsList()[:10]
#c.symbolsList()
c.iexSymbols()[:5]
c.iexSymbolsDF().head()
c.iexSymbolsList()[:10]
c.internationalSymbols()[:5]
c.internationalSymbolsDF("CA").head()
symbols=c.internationalSymbolsDF("CA")
#symbols.keys()
#symbols.loc["AAN-CV"]
len(symbols.index)
c.internationalSymbolsList()[:5]
c.fxSymbols()
dfs = c.fxSymbolsDF()
dfs[0].head()
dfs[1].head()
l = c.fxSymbolsList()
l[0][:5]
l[1][:5]
d = c.optionsSymbols()
list(d.keys())[:5]
c.optionsSymbolsDF().head()
c.optionsSymbolsList()[:5]
c.internationalExchanges()[:5]
c.internationalExchangesDF().head()
c.exchanges()[:5]
c.exchangesDF().head()
c.holidays()[:5]
c.holidaysDF().head()
c.sectors()[:5]
c.sectorsDF().head()
c.tags()[:5]
c.tagsDF().head()
c.points()[:5]
c.pointsDF().head()
c.points(key='DGS10')
c.pointsDF('aapl').head()
c.points('aapl', 'QUOTE-LATESTPRICE')
symbol = 'AAPL'
c.advancedStats(symbol)
c.advancedStatsDF(symbol)
c.balanceSheet(symbol)
c.balanceSheetDF(symbol)
c.book(symbol)
c.bookDF(symbol).head()
c.cashFlow(symbol)
c.cashFlowDF(symbol)
c.collections?
c.collectionsDF?
c.company(symbol)
c.companyDF(symbol)
c.delayedQuote(symbol)
c.delayedQuoteDF(symbol)
c.dividends(symbol)
c.dividendsDF(symbol)
c.earnings(symbol)
c.earningsDF(symbol)
c.earningsToday()
c.earningsTodayDF().head()
c.spread(symbol)
c.spreadDF(symbol)
c.estimates(symbol)
c.estimatesDF(symbol)
c.financials(symbol)
c.financialsDF(symbol)
c.fundOwnership(symbol)
c.fundOwnershipDF(symbol)
c.chart(symbol)
c.chartDF(symbol)
c.incomeStatement(symbol)
c.incomeStatementDF(symbol)
c.insiderRoster(symbol)
c.insiderRosterDF(symbol)
c.insiderSummary(symbol)
c.insiderSummaryDF(symbol)
c.insiderTransactions(symbol)[:5]
c.insiderTransactionsDF(symbol)
c.institutionalOwnership(symbol)
c.institutionalOwnershipDF(symbol)
c.intraday(symbol)[:5]
c.intradayDF(symbol).head()
c.ipoToday()
c.ipoTodayDF()
c.ipoUpcoming()
c.ipoUpcomingDF()
c.keyStats(symbol)
c.keyStatsDF(symbol)
c.largestTrades(symbol)
c.largestTradesDF(symbol)
c.list()[:5]
c.listDF().head()
c.logo(symbol)
c.logoPNG(symbol) # Not available for sandbox
c.logoNotebook(symbol) # Not available for sandbox
c.marketVolume()[:5]
c.marketVolumeDF()
c.news(symbol)[:5]
c.newsDF(symbol).head()
c.marketNews()[:5]
c.marketNewsDF().head()
c.ohlc(symbol)
c.ohlcDF(symbol)
c.ohlc(symbol)
c.ohlcDF(symbol)
exps = c.optionExpirations(symbol)
exps
c.options(symbol, exps[0])[:5]
c.optionsDF(symbol, exps[0]).head()
c.peers(symbol)
c.peersDF(symbol)
c.yesterday(symbol)
c.yesterdayDF(symbol)
c.marketYesterday()['A']
c.marketYesterdayDF().head()
c.price(symbol)
c.priceDF(symbol)
c.priceTarget(symbol)
c.priceTargetDF(symbol)
c.quote(symbol)
c.quoteDF(symbol)
c.sectorPerformance()
c.sectorPerformanceDF()
c.splits(symbol)
c.splitsDF(symbol)
c.volumeByVenue(symbol)
c.volumeByVenueDF(symbol)
# c.crypto()
# c.cryptoDF()
c.sentiment(symbol)
c.sentimentDF(symbol)
c.ceoCompensation(symbol)
c.ceoCompensationDF(symbol)
| 0.279435 | 0.811863 |
```
class State(object):
def __init__(self, elevator, pairs):
self.elevator = elevator
self.unrolled = list(pairs)
self.pairs = list(zip(pairs[::2], pairs[1::2]))
@property
def islegal(self):
"""Return true if the state is not frying any microchips."""
# Check for unprotected Micro chips, and for each
# check if any other generator is on the same floor.
for i, (G, M) in enumerate(self.pairs):
if G != M:
for j, (G2, _) in enumerate(self.pairs):
if G2 == M:
return False
# At this point, return true if all values are legal floors
minimum = min(self.elevator, min(self.unrolled))
maximum = max(self.elevator, max(self.unrolled))
return 1 <= minimum and maximum <= 4
def moves(self):
for shift in -1, 1:
new_floor = self.elevator + shift
if 1 <= new_floor <= 4:
for i, v1 in enumerate(self.unrolled):
if v1 == self.elevator:
for j, v2 in enumerate(self.unrolled[i:], start=i):
if v2 == self.elevator:
new = self.unrolled[:]
new[i] += shift
if i != j:
new[j] += shift
new_state = State(new_floor, new)
if new_state.islegal:
yield new_state
def __eq__(self, other):
return repr(self) == repr(other)
def __lt__(self, other):
return repr(self) < repr(other)
def __hash__(self):
return hash(repr(self))
def __repr__(self):
return repr(self.elevator) + ' ' + ' '.join(''.join(repr(a) for a in pair) for pair in self.pairs)
state = State(1, [2,1,3,1])
print(state.islegal)
print(state)
print(list(state.moves()))
print(state)
import random
def zobrist(n_types=5, n_floors=4):
maxint = 99999999999999999999999999
floor_hash = {i: random.randint(0, maxint) for i in range(1, n_floors+1)}
ischip_hash, isgen_hash = (random.random(0, maxint) for _ in range(2))
chiptype_hash = {i: random.randint(0, maxint) for i in range(n_types)}
gentype_hash = {i: random.randint(0, maxint) for i in range(n_types)}
def shash(state):
code = 0
gens_at_floor = {i: set() for i in range(1, n_floors+1)}
chips_at_floor = {i: set() for i in range(1, n_floors+1)}
for i, f in state.unrolled:
if i % 2 == 0:
gens_at_floor[f].add(i)
else:
chips_at_floor[f].add(i)
matched = {i: gens_at_floor[i].intersection(chips_at_floor[i]) for i in range(1, n_floors+1)}
gens_at_floor = {i: gens_at_floor[i] - matched[i] for i in range(1, n_floors+1)}
chips_at_floor = {i: chips_at_floor[i] - matched[i] for i in range(1, n_floors+1)}
for f in range(1, n_floors+1):
res = res ^ fl
from collections import deque
def bfs(start_state, target):
visited = set()
start_state.dist = 0
queue = deque([start_state])
curr_depth = 0
while queue:
current = queue.popleft()
visited.add(current)
if current == target:
return current
if current.dist > curr_depth:
curr_depth = current.dist
print('At depth', current.dist)
for neighbor in current.moves():
if neighbor not in visited:
neighbor.dist = current.dist + 1
neighbor.parent = current
queue.append(neighbor)
return start_state
target_state = State(1, [2,1,3,1])
start_state = State(4, [4,4,4,4])
dest = bfs(start_state, target_state)
print('Minimum elevator rides to target:', dest.dist)
target_state = State(1, [1,1,2,3,2,3,2,3,2,3])
start_state = State(4, [4,4,4,4,4,4,4,4,4,4])
dest = bfs(start_state, target_state)
print('Minimum elevator rides to target:', dest.dist)
```
|
github_jupyter
|
class State(object):
def __init__(self, elevator, pairs):
self.elevator = elevator
self.unrolled = list(pairs)
self.pairs = list(zip(pairs[::2], pairs[1::2]))
@property
def islegal(self):
"""Return true if the state is not frying any microchips."""
# Check for unprotected Micro chips, and for each
# check if any other generator is on the same floor.
for i, (G, M) in enumerate(self.pairs):
if G != M:
for j, (G2, _) in enumerate(self.pairs):
if G2 == M:
return False
# At this point, return true if all values are legal floors
minimum = min(self.elevator, min(self.unrolled))
maximum = max(self.elevator, max(self.unrolled))
return 1 <= minimum and maximum <= 4
def moves(self):
for shift in -1, 1:
new_floor = self.elevator + shift
if 1 <= new_floor <= 4:
for i, v1 in enumerate(self.unrolled):
if v1 == self.elevator:
for j, v2 in enumerate(self.unrolled[i:], start=i):
if v2 == self.elevator:
new = self.unrolled[:]
new[i] += shift
if i != j:
new[j] += shift
new_state = State(new_floor, new)
if new_state.islegal:
yield new_state
def __eq__(self, other):
return repr(self) == repr(other)
def __lt__(self, other):
return repr(self) < repr(other)
def __hash__(self):
return hash(repr(self))
def __repr__(self):
return repr(self.elevator) + ' ' + ' '.join(''.join(repr(a) for a in pair) for pair in self.pairs)
state = State(1, [2,1,3,1])
print(state.islegal)
print(state)
print(list(state.moves()))
print(state)
import random
def zobrist(n_types=5, n_floors=4):
maxint = 99999999999999999999999999
floor_hash = {i: random.randint(0, maxint) for i in range(1, n_floors+1)}
ischip_hash, isgen_hash = (random.random(0, maxint) for _ in range(2))
chiptype_hash = {i: random.randint(0, maxint) for i in range(n_types)}
gentype_hash = {i: random.randint(0, maxint) for i in range(n_types)}
def shash(state):
code = 0
gens_at_floor = {i: set() for i in range(1, n_floors+1)}
chips_at_floor = {i: set() for i in range(1, n_floors+1)}
for i, f in state.unrolled:
if i % 2 == 0:
gens_at_floor[f].add(i)
else:
chips_at_floor[f].add(i)
matched = {i: gens_at_floor[i].intersection(chips_at_floor[i]) for i in range(1, n_floors+1)}
gens_at_floor = {i: gens_at_floor[i] - matched[i] for i in range(1, n_floors+1)}
chips_at_floor = {i: chips_at_floor[i] - matched[i] for i in range(1, n_floors+1)}
for f in range(1, n_floors+1):
res = res ^ fl
from collections import deque
def bfs(start_state, target):
visited = set()
start_state.dist = 0
queue = deque([start_state])
curr_depth = 0
while queue:
current = queue.popleft()
visited.add(current)
if current == target:
return current
if current.dist > curr_depth:
curr_depth = current.dist
print('At depth', current.dist)
for neighbor in current.moves():
if neighbor not in visited:
neighbor.dist = current.dist + 1
neighbor.parent = current
queue.append(neighbor)
return start_state
target_state = State(1, [2,1,3,1])
start_state = State(4, [4,4,4,4])
dest = bfs(start_state, target_state)
print('Minimum elevator rides to target:', dest.dist)
target_state = State(1, [1,1,2,3,2,3,2,3,2,3])
start_state = State(4, [4,4,4,4,4,4,4,4,4,4])
dest = bfs(start_state, target_state)
print('Minimum elevator rides to target:', dest.dist)
| 0.764979 | 0.398611 |

[](https://githubtocolab.com/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/streamlit_notebooks/TRANSLATION_PIPELINES_MODELS.ipynb)
# Spark NLP Translation Models & Pipelines
## 0. Colab Setup
```
# Install PySpark and Spark NLP
! pip install -q pyspark==3.1.2 spark-nlp
```
## 1. Start Spark Session
```
import sparknlp
spark = sparknlp.start()
from sparknlp.base import *
from sparknlp.annotator import *
print("Spark NLP version", sparknlp.version())
print("Apache Spark version:", spark.version)
spark
```
## **Pipelines**: Open-Source Marian Translation Pipelines
### Translate English to Hausa Pipeline
```
from sparknlp.pretrained import PretrainedPipeline
pipeline = PretrainedPipeline("translate_en_ha", lang = "xx")
pipeline.annotate("Your sentence to translate!")
```
### Translate English to Azerbaijani Pipeline
```
from sparknlp.pretrained import PretrainedPipeline
pipeline = PretrainedPipeline("translate_en_az", lang = "xx")
pipeline.annotate("Your sentence to translate!")
```
### Translate English to Baltic languages Pipeline
```
from sparknlp.pretrained import PretrainedPipeline
pipeline = PretrainedPipeline("translate_en_bat", lang = "xx")
pipeline.annotate("Your sentence to translate!")
```
### Translate English to Arabic Pipeline
```
from sparknlp.pretrained import PretrainedPipeline
pipeline = PretrainedPipeline("translate_en_ar", lang = "xx")
pipeline.annotate("Your sentence to translate!")
```
## **Models**: Open-Source Marian Fast Neural Machine Translation Models
### Translation Model from Spanish to Waray
```
input_dict = {
1: "Además de ser el rey del norte, John Snow es un médico inglés y líder en el desarrollo de la anestesia y la higiene médica. Se le considera el primero en utilizar datos para curar el brote de cólera en 1854.",
2: "Titanic es una película épica estadounidense de 1997 sobre desastres y romance dirigida, escrita, coproducida y coeditada por James Cameron. Incorpora aspectos históricos y ficticios, y se basa en relatos del hundimiento del RMS Titanic. Está protagonizada por Leonardo DiCaprio y Kate Winslet como miembros de diferentes clases sociales que se enamoran a bordo del barco durante su desafortunado viaje inaugural.",
3: "William Henry Gates III (nacido el 28 de octubre de 1955) es un magnate empresarial, desarrollador de software, inversor y filántropo estadounidense. Es mejor conocido como cofundador de Microsoft Corporation. Durante su carrera en Microsoft, Gates ocupó los puestos de presidente, director ejecutivo (CEO), presidente y arquitecto jefe de software. También fue el mayor accionista individual hasta mayo de 2014. Es uno de los empresarios más reconocidos y pioneros de la revolución de las microcomputadoras de los años setenta y ochenta.",
4: "La Mona Lisa es una obra de Leonardo.. Se celebra en el Louvre en París",
5: "Facebook es un servicio de red social lanzado como TheFacebook el 4 de febrero de 2004. Fue fundado por Mark Zuckerberg con sus compañeros de cuarto y compañeros de la Universidad de Harvard Eduardo Saverin, Andrew McCollum, Dustin Moskovitz y Chris Hughes. La membresía del sitio web estaba inicialmente limitada por los fundadores a los estudiantes de Harvard, pero se expandió a otras universidades en el área de Boston, la Ivy League y gradualmente a la mayoría de las universidades de los Estados Unidos y Canadá.",
6: "Geoffrey Everest Hinton es un psicólogo cognitivo e informático canadiense inglés, más conocido por su trabajo en redes neuronales artificiales. Desde 2013 divide su tiempo trabajando para Google y la Universidad de Toronto. En 2017, fue cofundador y se convirtió en el Asesor Científico Jefe del Vector Institute en Toronto.",
7: "Cuando le dije a John que quería mudarme a Alaska, me advirtió que tendría problemas para encontrar un Starbucks allí."
}
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
sentencerDL = SentenceDetectorDLModel.pretrained("sentence_detector_dl", "xx")\
.setInputCols(["document"])\
.setOutputCol("sentence")
marian = MarianTransformer.pretrained("opus_mt_es_war", "xx")\
.setInputCols(["sentence"])\
.setOutputCol("translation")
marian_pipeline = Pipeline(stages=[documentAssembler, sentencerDL, marian])
light_pipeline = LightPipeline(marian_pipeline.fit(spark.createDataFrame([[""]]).toDF("text")))
for sample in input_dict.values():
result = light_pipeline.fullAnnotate(sample)
print ('Translated:\n')
for sentence in result[0]['translation']:
print (sentence.result)
print("-"*100)
```
### Translation Model from Thai to English
```
data = "ประโยคของคุณที่จะแปล!"
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
sentencerDL = SentenceDetectorDLModel.pretrained("sentence_detector_dl", "xx")\
.setInputCols(["document"])\
.setOutputCol("sentence")
marian = MarianTransformer.pretrained("opus_mt_th_en", "xx")\
.setInputCols(["sentence"])\
.setOutputCol("translation")
marian_pipeline = Pipeline(stages=[documentAssembler, sentencerDL, marian])
light_pipeline = LightPipeline(marian_pipeline.fit(spark.createDataFrame([[""]]).toDF("text")))
result = light_pipeline.fullAnnotate(data)
```
### Translation Model from Turkish to English
```
data = "Çevrilecek cümleniz!"
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
sentencerDL = SentenceDetectorDLModel.pretrained("sentence_detector_dl", "xx")\
.setInputCols(["document"])\
.setOutputCol("sentence")
marian = MarianTransformer.pretrained("opus_mt_tr_en", "xx")\
.setInputCols(["sentence"])\
.setOutputCol("translation")
marian_pipeline = Pipeline(stages=[documentAssembler, sentencerDL, marian])
light_pipeline = LightPipeline(marian_pipeline.fit(spark.createDataFrame([[""]]).toDF("text")))
result = light_pipeline.fullAnnotate(data)
```
### Translation Model from Hungarian to English
```
data = "Fordítandó mondatod!"
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
sentencerDL = SentenceDetectorDLModel.pretrained("sentence_detector_dl", "xx")\
.setInputCols(["document"])\
.setOutputCol("sentence")
marian = MarianTransformer.pretrained("opus_mt_hu_en", "xx")\
.setInputCols(["sentence"])\
.setOutputCol("translation")
marian_pipeline = Pipeline(stages=[documentAssembler, sentencerDL, marian])
light_pipeline = LightPipeline(marian_pipeline.fit(spark.createDataFrame([[""]]).toDF("text")))
result = light_pipeline.fullAnnotate(data)
```
|
github_jupyter
|
# Install PySpark and Spark NLP
! pip install -q pyspark==3.1.2 spark-nlp
import sparknlp
spark = sparknlp.start()
from sparknlp.base import *
from sparknlp.annotator import *
print("Spark NLP version", sparknlp.version())
print("Apache Spark version:", spark.version)
spark
from sparknlp.pretrained import PretrainedPipeline
pipeline = PretrainedPipeline("translate_en_ha", lang = "xx")
pipeline.annotate("Your sentence to translate!")
from sparknlp.pretrained import PretrainedPipeline
pipeline = PretrainedPipeline("translate_en_az", lang = "xx")
pipeline.annotate("Your sentence to translate!")
from sparknlp.pretrained import PretrainedPipeline
pipeline = PretrainedPipeline("translate_en_bat", lang = "xx")
pipeline.annotate("Your sentence to translate!")
from sparknlp.pretrained import PretrainedPipeline
pipeline = PretrainedPipeline("translate_en_ar", lang = "xx")
pipeline.annotate("Your sentence to translate!")
input_dict = {
1: "Además de ser el rey del norte, John Snow es un médico inglés y líder en el desarrollo de la anestesia y la higiene médica. Se le considera el primero en utilizar datos para curar el brote de cólera en 1854.",
2: "Titanic es una película épica estadounidense de 1997 sobre desastres y romance dirigida, escrita, coproducida y coeditada por James Cameron. Incorpora aspectos históricos y ficticios, y se basa en relatos del hundimiento del RMS Titanic. Está protagonizada por Leonardo DiCaprio y Kate Winslet como miembros de diferentes clases sociales que se enamoran a bordo del barco durante su desafortunado viaje inaugural.",
3: "William Henry Gates III (nacido el 28 de octubre de 1955) es un magnate empresarial, desarrollador de software, inversor y filántropo estadounidense. Es mejor conocido como cofundador de Microsoft Corporation. Durante su carrera en Microsoft, Gates ocupó los puestos de presidente, director ejecutivo (CEO), presidente y arquitecto jefe de software. También fue el mayor accionista individual hasta mayo de 2014. Es uno de los empresarios más reconocidos y pioneros de la revolución de las microcomputadoras de los años setenta y ochenta.",
4: "La Mona Lisa es una obra de Leonardo.. Se celebra en el Louvre en París",
5: "Facebook es un servicio de red social lanzado como TheFacebook el 4 de febrero de 2004. Fue fundado por Mark Zuckerberg con sus compañeros de cuarto y compañeros de la Universidad de Harvard Eduardo Saverin, Andrew McCollum, Dustin Moskovitz y Chris Hughes. La membresía del sitio web estaba inicialmente limitada por los fundadores a los estudiantes de Harvard, pero se expandió a otras universidades en el área de Boston, la Ivy League y gradualmente a la mayoría de las universidades de los Estados Unidos y Canadá.",
6: "Geoffrey Everest Hinton es un psicólogo cognitivo e informático canadiense inglés, más conocido por su trabajo en redes neuronales artificiales. Desde 2013 divide su tiempo trabajando para Google y la Universidad de Toronto. En 2017, fue cofundador y se convirtió en el Asesor Científico Jefe del Vector Institute en Toronto.",
7: "Cuando le dije a John que quería mudarme a Alaska, me advirtió que tendría problemas para encontrar un Starbucks allí."
}
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
sentencerDL = SentenceDetectorDLModel.pretrained("sentence_detector_dl", "xx")\
.setInputCols(["document"])\
.setOutputCol("sentence")
marian = MarianTransformer.pretrained("opus_mt_es_war", "xx")\
.setInputCols(["sentence"])\
.setOutputCol("translation")
marian_pipeline = Pipeline(stages=[documentAssembler, sentencerDL, marian])
light_pipeline = LightPipeline(marian_pipeline.fit(spark.createDataFrame([[""]]).toDF("text")))
for sample in input_dict.values():
result = light_pipeline.fullAnnotate(sample)
print ('Translated:\n')
for sentence in result[0]['translation']:
print (sentence.result)
print("-"*100)
data = "ประโยคของคุณที่จะแปล!"
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
sentencerDL = SentenceDetectorDLModel.pretrained("sentence_detector_dl", "xx")\
.setInputCols(["document"])\
.setOutputCol("sentence")
marian = MarianTransformer.pretrained("opus_mt_th_en", "xx")\
.setInputCols(["sentence"])\
.setOutputCol("translation")
marian_pipeline = Pipeline(stages=[documentAssembler, sentencerDL, marian])
light_pipeline = LightPipeline(marian_pipeline.fit(spark.createDataFrame([[""]]).toDF("text")))
result = light_pipeline.fullAnnotate(data)
data = "Çevrilecek cümleniz!"
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
sentencerDL = SentenceDetectorDLModel.pretrained("sentence_detector_dl", "xx")\
.setInputCols(["document"])\
.setOutputCol("sentence")
marian = MarianTransformer.pretrained("opus_mt_tr_en", "xx")\
.setInputCols(["sentence"])\
.setOutputCol("translation")
marian_pipeline = Pipeline(stages=[documentAssembler, sentencerDL, marian])
light_pipeline = LightPipeline(marian_pipeline.fit(spark.createDataFrame([[""]]).toDF("text")))
result = light_pipeline.fullAnnotate(data)
data = "Fordítandó mondatod!"
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
sentencerDL = SentenceDetectorDLModel.pretrained("sentence_detector_dl", "xx")\
.setInputCols(["document"])\
.setOutputCol("sentence")
marian = MarianTransformer.pretrained("opus_mt_hu_en", "xx")\
.setInputCols(["sentence"])\
.setOutputCol("translation")
marian_pipeline = Pipeline(stages=[documentAssembler, sentencerDL, marian])
light_pipeline = LightPipeline(marian_pipeline.fit(spark.createDataFrame([[""]]).toDF("text")))
result = light_pipeline.fullAnnotate(data)
| 0.380299 | 0.958382 |
# Ray Tune Tutorial - 03: Search Algorithms and Schedulers - Exercise Solution
© 2019-2020, Anyscale. All Rights Reserved

Unlike for previous tutorials, we use separate notebooks for the Tune tutorial solutions, because most of the exercises take a long time to run.
First, we set up everything we need from the lesson.
```
import ray
from ray import tune
ray.init(ignore_reinit_error=True)
```
## Exercise - PopulationBasedTraining
```
from ray.tune.schedulers import PopulationBasedTraining
import sys
sys.path.append("..")
from mnist import ConvNet, TrainMNIST, EPOCH_SIZE, TEST_SIZE, DATA_ROOT
experiment_metrics = dict(metric="mean_accuracy", mode="max")
#search_algorithm = TuneBOHB(config_space, max_concurrent=4, **experiment_metrics)
pbt_scheduler = PopulationBasedTraining(
time_attr='training_iteration',
perturbation_interval=10, # Every N time_attr units, "perturb" the parameters.
hyperparam_mutations={
"lr": [0.001, 0.01, 0.1],
"momentum": [0.001, 0.01, 0.1, 0.9]
},
**experiment_metrics)
# This object is used to bootstrap the process, but these values won't be changed, so when you see them
# listed in the analysis.dataframe(), all values will be the same! Instead, look at the `experiment_tag`.
config = {
"lr": 0.001, # Use the lowest values from the previous cell
"momentum": 0.001
}
```
Now modify the `tune.run()` call we used in the lesson and run it.
> **WARNING:** This will run for a few minutes.
```
analysis = tune.run(TrainMNIST,
scheduler=pbt_scheduler,
config=config,
stop={"mean_accuracy": 0.97, "training_iteration": 600},
num_samples=8,
verbose=1
)
print("Best config: ", analysis.get_best_config(metric="mean_accuracy"))
analysis.dataframe().sort_values('mean_accuracy', ascending=False).head()
```
It's easy to get above `0.97` accuracy and in fact, it's a poor choice for a stopping criterion because we don't explore as well as we should, so let's sort by `training_iteration` to see which combinations were fast.
```
analysis.dataframe()[['mean_accuracy', 'experiment_tag', 'training_iteration']].sort_values('training_iteration', ascending=True)
```
As expected, higher values for the learning rate and momentum generally provide quicker convergence. All but one of the trials shown had a learning rate of `0.1`. The momentum value was much less significant.
```
stats = analysis.stats()
secs = stats["timestamp"] - stats["start_time"]
print(f'{secs:7.2f} seconds, {secs/60.0:7.2f} minutes')
```
Try changing the experiment to ensure we explore more combinations.
```
ray.shutdown() # "Undo ray.init()".
```
|
github_jupyter
|
import ray
from ray import tune
ray.init(ignore_reinit_error=True)
from ray.tune.schedulers import PopulationBasedTraining
import sys
sys.path.append("..")
from mnist import ConvNet, TrainMNIST, EPOCH_SIZE, TEST_SIZE, DATA_ROOT
experiment_metrics = dict(metric="mean_accuracy", mode="max")
#search_algorithm = TuneBOHB(config_space, max_concurrent=4, **experiment_metrics)
pbt_scheduler = PopulationBasedTraining(
time_attr='training_iteration',
perturbation_interval=10, # Every N time_attr units, "perturb" the parameters.
hyperparam_mutations={
"lr": [0.001, 0.01, 0.1],
"momentum": [0.001, 0.01, 0.1, 0.9]
},
**experiment_metrics)
# This object is used to bootstrap the process, but these values won't be changed, so when you see them
# listed in the analysis.dataframe(), all values will be the same! Instead, look at the `experiment_tag`.
config = {
"lr": 0.001, # Use the lowest values from the previous cell
"momentum": 0.001
}
analysis = tune.run(TrainMNIST,
scheduler=pbt_scheduler,
config=config,
stop={"mean_accuracy": 0.97, "training_iteration": 600},
num_samples=8,
verbose=1
)
print("Best config: ", analysis.get_best_config(metric="mean_accuracy"))
analysis.dataframe().sort_values('mean_accuracy', ascending=False).head()
analysis.dataframe()[['mean_accuracy', 'experiment_tag', 'training_iteration']].sort_values('training_iteration', ascending=True)
stats = analysis.stats()
secs = stats["timestamp"] - stats["start_time"]
print(f'{secs:7.2f} seconds, {secs/60.0:7.2f} minutes')
ray.shutdown() # "Undo ray.init()".
| 0.59302 | 0.986533 |
```
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
```
# Fit a mesh via rendering
This tutorial shows how to:
- Load a mesh and textures from an `.obj` file.
- Create a synthetic dataset by rendering a textured mesh from multiple viewpoints
- Fit a mesh to the observed synthetic images using differential silhouette rendering
- Fit a mesh and its textures using differential textured rendering
## 0. Install and Import modules
Ensure `torch` and `torchvision` are installed. If `pytorch3d` is not installed, install it using the following cell:
```
import os
import sys
import torch
need_pytorch3d=False
try:
import pytorch3d
except ModuleNotFoundError:
need_pytorch3d=True
if need_pytorch3d:
if torch.__version__.startswith("1.9") and sys.platform.startswith("linux"):
# We try to install PyTorch3D via a released wheel.
version_str="".join([
f"py3{sys.version_info.minor}_cu",
torch.version.cuda.replace(".",""),
f"_pyt{torch.__version__[0:5:2]}"
])
!pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html
else:
# We try to install PyTorch3D from source.
!curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz
!tar xzf 1.10.0.tar.gz
os.environ["CUB_HOME"] = os.getcwd() + "/cub-1.10.0"
!pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'
import os
import torch
import matplotlib.pyplot as plt
from pytorch3d.utils import ico_sphere
import numpy as np
from tqdm.notebook import tqdm
# Util function for loading meshes
from pytorch3d.io import load_objs_as_meshes, save_obj
from pytorch3d.loss import (
chamfer_distance,
mesh_edge_loss,
mesh_laplacian_smoothing,
mesh_normal_consistency,
)
# Data structures and functions for rendering
from pytorch3d.structures import Meshes
from pytorch3d.renderer import (
look_at_view_transform,
OpenGLPerspectiveCameras,
PointLights,
DirectionalLights,
Materials,
RasterizationSettings,
MeshRenderer,
MeshRasterizer,
SoftPhongShader,
SoftSilhouetteShader,
SoftPhongShader,
TexturesVertex
)
# add path for demo utils functions
import sys
import os
sys.path.append(os.path.abspath(''))
```
If using **Google Colab**, fetch the utils file for plotting image grids:
```
!wget https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/docs/tutorials/utils/plot_image_grid.py
from plot_image_grid import image_grid
```
OR if running **locally** uncomment and run the following cell:
```
# from utils.plot_image_grid import image_grid
```
### 1. Load a mesh and texture file
Load an `.obj` file and its associated `.mtl` file and create a **Textures** and **Meshes** object.
**Meshes** is a unique datastructure provided in PyTorch3D for working with batches of meshes of different sizes.
**TexturesVertex** is an auxiliary datastructure for storing vertex rgb texture information about meshes.
**Meshes** has several class methods which are used throughout the rendering pipeline.
If running this notebook using **Google Colab**, run the following cell to fetch the mesh obj and texture files and save it at the path `data/cow_mesh`:
If running locally, the data is already available at the correct path.
```
!mkdir -p data/cow_mesh
!wget -P data/cow_mesh https://dl.fbaipublicfiles.com/pytorch3d/data/cow_mesh/cow.obj
!wget -P data/cow_mesh https://dl.fbaipublicfiles.com/pytorch3d/data/cow_mesh/cow.mtl
!wget -P data/cow_mesh https://dl.fbaipublicfiles.com/pytorch3d/data/cow_mesh/cow_texture.png
# Setup
if torch.cuda.is_available():
device = torch.device("cuda:0")
torch.cuda.set_device(device)
else:
device = torch.device("cpu")
# Set paths
DATA_DIR = "./data"
obj_filename = os.path.join(DATA_DIR, "cow_mesh/cow.obj")
# Load obj file
mesh = load_objs_as_meshes([obj_filename], device=device)
# We scale normalize and center the target mesh to fit in a sphere of radius 1
# centered at (0,0,0). (scale, center) will be used to bring the predicted mesh
# to its original center and scale. Note that normalizing the target mesh,
# speeds up the optimization but is not necessary!
verts = mesh.verts_packed()
N = verts.shape[0]
center = verts.mean(0)
scale = max((verts - center).abs().max(0)[0])
mesh.offset_verts_(-center)
mesh.scale_verts_((1.0 / float(scale)));
```
## 2. Dataset Creation
We sample different camera positions that encode multiple viewpoints of the cow. We create a renderer with a shader that performs texture map interpolation. We render a synthetic dataset of images of the textured cow mesh from multiple viewpoints.
```
# the number of different viewpoints from which we want to render the mesh.
num_views = 20
# Get a batch of viewing angles.
elev = torch.linspace(0, 360, num_views)
azim = torch.linspace(-180, 180, num_views)
# Place a point light in front of the object. As mentioned above, the front of
# the cow is facing the -z direction.
lights = PointLights(device=device, location=[[0.0, 0.0, -3.0]])
# Initialize an OpenGL perspective camera that represents a batch of different
# viewing angles. All the cameras helper methods support mixed type inputs and
# broadcasting. So we can view the camera from the a distance of dist=2.7, and
# then specify elevation and azimuth angles for each viewpoint as tensors.
R, T = look_at_view_transform(dist=2.7, elev=elev, azim=azim)
cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
# We arbitrarily choose one particular view that will be used to visualize
# results
camera = OpenGLPerspectiveCameras(device=device, R=R[None, 1, ...],
T=T[None, 1, ...])
# Define the settings for rasterization and shading. Here we set the output
# image to be of size 128X128. As we are rendering images for visualization
# purposes only we will set faces_per_pixel=1 and blur_radius=0.0. Refer to
# rasterize_meshes.py for explanations of these parameters. We also leave
# bin_size and max_faces_per_bin to their default values of None, which sets
# their values using heuristics and ensures that the faster coarse-to-fine
# rasterization method is used. Refer to docs/notes/renderer.md for an
# explanation of the difference between naive and coarse-to-fine rasterization.
raster_settings = RasterizationSettings(
image_size=128,
blur_radius=0.0,
faces_per_pixel=1,
)
# Create a Phong renderer by composing a rasterizer and a shader. The textured
# Phong shader will interpolate the texture uv coordinates for each vertex,
# sample from a texture image and apply the Phong lighting model
renderer = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=camera,
raster_settings=raster_settings
),
shader=SoftPhongShader(
device=device,
cameras=camera,
lights=lights
)
)
# Create a batch of meshes by repeating the cow mesh and associated textures.
# Meshes has a useful `extend` method which allows us do this very easily.
# This also extends the textures.
meshes = mesh.extend(num_views)
# Render the cow mesh from each viewing angle
target_images = renderer(meshes, cameras=cameras, lights=lights)
# Our multi-view cow dataset will be represented by these 2 lists of tensors,
# each of length num_views.
target_rgb = [target_images[i, ..., :3] for i in range(num_views)]
target_cameras = [OpenGLPerspectiveCameras(device=device, R=R[None, i, ...],
T=T[None, i, ...]) for i in range(num_views)]
```
Visualize the dataset:
```
# RGB images
image_grid(target_images.cpu().numpy(), rows=4, cols=5, rgb=True)
plt.show()
```
Later in this tutorial, we will fit a mesh to the rendered RGB images, as well as to just images of just the cow silhouette. For the latter case, we will render a dataset of silhouette images. Most shaders in PyTorch3D will output an alpha channel along with the RGB image as a 4th channel in an RGBA image. The alpha channel encodes the probability that each pixel belongs to the foreground of the object. We construct a soft silhouette shader to render this alpha channel.
```
# Rasterization settings for silhouette rendering
sigma = 1e-4
raster_settings_silhouette = RasterizationSettings(
image_size=128,
blur_radius=np.log(1. / 1e-4 - 1.)*sigma,
faces_per_pixel=50,
)
# Silhouette renderer
renderer_silhouette = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=camera,
raster_settings=raster_settings_silhouette
),
shader=SoftSilhouetteShader()
)
# Render silhouette images. The 3rd channel of the rendering output is
# the alpha/silhouette channel
silhouette_images = renderer_silhouette(meshes, cameras=cameras, lights=lights)
target_silhouette = [silhouette_images[i, ..., 3] for i in range(num_views)]
# Visualize silhouette images
image_grid(silhouette_images.cpu().numpy(), rows=4, cols=5, rgb=False)
plt.show()
```
## 3. Mesh prediction via silhouette rendering
In the previous section, we created a dataset of images of multiple viewpoints of a cow. In this section, we predict a mesh by observing those target images without any knowledge of the ground truth cow mesh. We assume we know the position of the cameras and lighting.
We first define some helper functions to visualize the results of our mesh prediction:
```
# Show a visualization comparing the rendered predicted mesh to the ground truth
# mesh
def visualize_prediction(predicted_mesh, renderer=renderer_silhouette,
target_image=target_rgb[1], title='',
silhouette=False):
inds = 3 if silhouette else range(3)
with torch.no_grad():
predicted_images = renderer(predicted_mesh)
plt.figure(figsize=(20, 10))
plt.subplot(1, 2, 1)
plt.imshow(predicted_images[0, ..., inds].cpu().detach().numpy())
plt.subplot(1, 2, 2)
plt.imshow(target_image.cpu().detach().numpy())
plt.title(title)
plt.axis("off")
# Plot losses as a function of optimization iteration
def plot_losses(losses):
fig = plt.figure(figsize=(13, 5))
ax = fig.gca()
for k, l in losses.items():
ax.plot(l['values'], label=k + " loss")
ax.legend(fontsize="16")
ax.set_xlabel("Iteration", fontsize="16")
ax.set_ylabel("Loss", fontsize="16")
ax.set_title("Loss vs iterations", fontsize="16")
```
Starting from a sphere mesh, we will learn offsets of each vertex such that the predicted mesh silhouette is more similar to the target silhouette image at each optimization step. We begin by loading our initial sphere mesh:
```
# We initialize the source shape to be a sphere of radius 1.
src_mesh = ico_sphere(4, device)
```
We create a new differentiable renderer for rendering the silhouette of our predicted mesh:
```
# Rasterization settings for differentiable rendering, where the blur_radius
# initialization is based on Liu et al, 'Soft Rasterizer: A Differentiable
# Renderer for Image-based 3D Reasoning', ICCV 2019
sigma = 1e-4
raster_settings_soft = RasterizationSettings(
image_size=128,
blur_radius=np.log(1. / 1e-4 - 1.)*sigma,
faces_per_pixel=50,
)
# Silhouette renderer
renderer_silhouette = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=camera,
raster_settings=raster_settings_soft
),
shader=SoftSilhouetteShader()
)
```
We initialize settings, losses, and the optimizer that will be used to iteratively fit our mesh to the target silhouettes:
```
# Number of views to optimize over in each SGD iteration
num_views_per_iteration = 2
# Number of optimization steps
Niter = 2000
# Plot period for the losses
plot_period = 250
%matplotlib inline
# Optimize using rendered silhouette image loss, mesh edge loss, mesh normal
# consistency, and mesh laplacian smoothing
losses = {"silhouette": {"weight": 1.0, "values": []},
"edge": {"weight": 1.0, "values": []},
"normal": {"weight": 0.01, "values": []},
"laplacian": {"weight": 1.0, "values": []},
}
# Losses to smooth / regularize the mesh shape
def update_mesh_shape_prior_losses(mesh, loss):
# and (b) the edge length of the predicted mesh
loss["edge"] = mesh_edge_loss(mesh)
# mesh normal consistency
loss["normal"] = mesh_normal_consistency(mesh)
# mesh laplacian smoothing
loss["laplacian"] = mesh_laplacian_smoothing(mesh, method="uniform")
# We will learn to deform the source mesh by offsetting its vertices
# The shape of the deform parameters is equal to the total number of vertices in
# src_mesh
verts_shape = src_mesh.verts_packed().shape
deform_verts = torch.full(verts_shape, 0.0, device=device, requires_grad=True)
# The optimizer
optimizer = torch.optim.SGD([deform_verts], lr=1.0, momentum=0.9)
```
We write an optimization loop to iteratively refine our predicted mesh from the sphere mesh into a mesh that matches the silhouettes of the target images:
```
loop = tqdm(range(Niter))
for i in loop:
# Initialize optimizer
optimizer.zero_grad()
# Deform the mesh
new_src_mesh = src_mesh.offset_verts(deform_verts)
# Losses to smooth /regularize the mesh shape
loss = {k: torch.tensor(0.0, device=device) for k in losses}
update_mesh_shape_prior_losses(new_src_mesh, loss)
# Compute the average silhouette loss over two random views, as the average
# squared L2 distance between the predicted silhouette and the target
# silhouette from our dataset
for j in np.random.permutation(num_views).tolist()[:num_views_per_iteration]:
images_predicted = renderer_silhouette(new_src_mesh, cameras=target_cameras[j], lights=lights)
predicted_silhouette = images_predicted[..., 3]
loss_silhouette = ((predicted_silhouette - target_silhouette[j]) ** 2).mean()
loss["silhouette"] += loss_silhouette / num_views_per_iteration
# Weighted sum of the losses
sum_loss = torch.tensor(0.0, device=device)
for k, l in loss.items():
sum_loss += l * losses[k]["weight"]
losses[k]["values"].append(float(l.detach().cpu()))
# Print the losses
loop.set_description("total_loss = %.6f" % sum_loss)
# Plot mesh
if i % plot_period == 0:
visualize_prediction(new_src_mesh, title="iter: %d" % i, silhouette=True,
target_image=target_silhouette[1])
# Optimization step
sum_loss.backward()
optimizer.step()
visualize_prediction(new_src_mesh, silhouette=True,
target_image=target_silhouette[1])
plot_losses(losses)
```
## 3. Mesh and texture prediction via textured rendering
We can predict both the mesh and its texture if we add an additional loss based on the comparing a predicted rendered RGB image to the target image. As before, we start with a sphere mesh. We learn both translational offsets and RGB texture colors for each vertex in the sphere mesh. Since our loss is based on rendered RGB pixel values instead of just the silhouette, we use a **SoftPhongShader** instead of a **SoftSilhouetteShader**.
```
# Rasterization settings for differentiable rendering, where the blur_radius
# initialization is based on Liu et al, 'Soft Rasterizer: A Differentiable
# Renderer for Image-based 3D Reasoning', ICCV 2019
sigma = 1e-4
raster_settings_soft = RasterizationSettings(
image_size=128,
blur_radius=np.log(1. / 1e-4 - 1.)*sigma,
faces_per_pixel=50,
)
# Differentiable soft renderer using per vertex RGB colors for texture
renderer_textured = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=camera,
raster_settings=raster_settings_soft
),
shader=SoftPhongShader(device=device,
cameras=camera,
lights=lights)
)
```
We initialize settings, losses, and the optimizer that will be used to iteratively fit our mesh to the target RGB images:
```
# Number of views to optimize over in each SGD iteration
num_views_per_iteration = 2
# Number of optimization steps
Niter = 2000
# Plot period for the losses
plot_period = 250
%matplotlib inline
# Optimize using rendered RGB image loss, rendered silhouette image loss, mesh
# edge loss, mesh normal consistency, and mesh laplacian smoothing
losses = {"rgb": {"weight": 1.0, "values": []},
"silhouette": {"weight": 1.0, "values": []},
"edge": {"weight": 1.0, "values": []},
"normal": {"weight": 0.01, "values": []},
"laplacian": {"weight": 1.0, "values": []},
}
# We will learn to deform the source mesh by offsetting its vertices
# The shape of the deform parameters is equal to the total number of vertices in
# src_mesh
verts_shape = src_mesh.verts_packed().shape
deform_verts = torch.full(verts_shape, 0.0, device=device, requires_grad=True)
# We will also learn per vertex colors for our sphere mesh that define texture
# of the mesh
sphere_verts_rgb = torch.full([1, verts_shape[0], 3], 0.5, device=device, requires_grad=True)
# The optimizer
optimizer = torch.optim.SGD([deform_verts, sphere_verts_rgb], lr=1.0, momentum=0.9)
```
We write an optimization loop to iteratively refine our predicted mesh and its vertex colors from the sphere mesh into a mesh that matches the target images:
```
loop = tqdm(range(Niter))
for i in loop:
# Initialize optimizer
optimizer.zero_grad()
# Deform the mesh
new_src_mesh = src_mesh.offset_verts(deform_verts)
# Add per vertex colors to texture the mesh
new_src_mesh.textures = TexturesVertex(verts_features=sphere_verts_rgb)
# Losses to smooth /regularize the mesh shape
loss = {k: torch.tensor(0.0, device=device) for k in losses}
update_mesh_shape_prior_losses(new_src_mesh, loss)
# Randomly select two views to optimize over in this iteration. Compared
# to using just one view, this helps resolve ambiguities between updating
# mesh shape vs. updating mesh texture
for j in np.random.permutation(num_views).tolist()[:num_views_per_iteration]:
images_predicted = renderer_textured(new_src_mesh, cameras=target_cameras[j], lights=lights)
# Squared L2 distance between the predicted silhouette and the target
# silhouette from our dataset
predicted_silhouette = images_predicted[..., 3]
loss_silhouette = ((predicted_silhouette - target_silhouette[j]) ** 2).mean()
loss["silhouette"] += loss_silhouette / num_views_per_iteration
# Squared L2 distance between the predicted RGB image and the target
# image from our dataset
predicted_rgb = images_predicted[..., :3]
loss_rgb = ((predicted_rgb - target_rgb[j]) ** 2).mean()
loss["rgb"] += loss_rgb / num_views_per_iteration
# Weighted sum of the losses
sum_loss = torch.tensor(0.0, device=device)
for k, l in loss.items():
sum_loss += l * losses[k]["weight"]
losses[k]["values"].append(float(l.detach().cpu()))
# Print the losses
loop.set_description("total_loss = %.6f" % sum_loss)
# Plot mesh
if i % plot_period == 0:
visualize_prediction(new_src_mesh, renderer=renderer_textured, title="iter: %d" % i, silhouette=False)
# Optimization step
sum_loss.backward()
optimizer.step()
visualize_prediction(new_src_mesh, renderer=renderer_textured, silhouette=False)
plot_losses(losses)
```
Save the final predicted mesh:
## 4. Save the final predicted mesh
```
# Fetch the verts and faces of the final predicted mesh
final_verts, final_faces = new_src_mesh.get_mesh_verts_faces(0)
# Scale normalize back to the original target size
final_verts = final_verts * scale + center
# Store the predicted mesh using save_obj
final_obj = os.path.join('./', 'final_model.obj')
save_obj(final_obj, final_verts, final_faces)
```
## 5. Conclusion
In this tutorial, we learned how to load a textured mesh from an obj file, create a synthetic dataset by rendering the mesh from multiple viewpoints. We showed how to set up an optimization loop to fit a mesh to the observed dataset images based on a rendered silhouette loss. We then augmented this optimization loop with an additional loss based on rendered RGB images, which allowed us to predict both a mesh and its texture.
|
github_jupyter
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import os
import sys
import torch
need_pytorch3d=False
try:
import pytorch3d
except ModuleNotFoundError:
need_pytorch3d=True
if need_pytorch3d:
if torch.__version__.startswith("1.9") and sys.platform.startswith("linux"):
# We try to install PyTorch3D via a released wheel.
version_str="".join([
f"py3{sys.version_info.minor}_cu",
torch.version.cuda.replace(".",""),
f"_pyt{torch.__version__[0:5:2]}"
])
!pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html
else:
# We try to install PyTorch3D from source.
!curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz
!tar xzf 1.10.0.tar.gz
os.environ["CUB_HOME"] = os.getcwd() + "/cub-1.10.0"
!pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'
import os
import torch
import matplotlib.pyplot as plt
from pytorch3d.utils import ico_sphere
import numpy as np
from tqdm.notebook import tqdm
# Util function for loading meshes
from pytorch3d.io import load_objs_as_meshes, save_obj
from pytorch3d.loss import (
chamfer_distance,
mesh_edge_loss,
mesh_laplacian_smoothing,
mesh_normal_consistency,
)
# Data structures and functions for rendering
from pytorch3d.structures import Meshes
from pytorch3d.renderer import (
look_at_view_transform,
OpenGLPerspectiveCameras,
PointLights,
DirectionalLights,
Materials,
RasterizationSettings,
MeshRenderer,
MeshRasterizer,
SoftPhongShader,
SoftSilhouetteShader,
SoftPhongShader,
TexturesVertex
)
# add path for demo utils functions
import sys
import os
sys.path.append(os.path.abspath(''))
!wget https://raw.githubusercontent.com/facebookresearch/pytorch3d/main/docs/tutorials/utils/plot_image_grid.py
from plot_image_grid import image_grid
# from utils.plot_image_grid import image_grid
!mkdir -p data/cow_mesh
!wget -P data/cow_mesh https://dl.fbaipublicfiles.com/pytorch3d/data/cow_mesh/cow.obj
!wget -P data/cow_mesh https://dl.fbaipublicfiles.com/pytorch3d/data/cow_mesh/cow.mtl
!wget -P data/cow_mesh https://dl.fbaipublicfiles.com/pytorch3d/data/cow_mesh/cow_texture.png
# Setup
if torch.cuda.is_available():
device = torch.device("cuda:0")
torch.cuda.set_device(device)
else:
device = torch.device("cpu")
# Set paths
DATA_DIR = "./data"
obj_filename = os.path.join(DATA_DIR, "cow_mesh/cow.obj")
# Load obj file
mesh = load_objs_as_meshes([obj_filename], device=device)
# We scale normalize and center the target mesh to fit in a sphere of radius 1
# centered at (0,0,0). (scale, center) will be used to bring the predicted mesh
# to its original center and scale. Note that normalizing the target mesh,
# speeds up the optimization but is not necessary!
verts = mesh.verts_packed()
N = verts.shape[0]
center = verts.mean(0)
scale = max((verts - center).abs().max(0)[0])
mesh.offset_verts_(-center)
mesh.scale_verts_((1.0 / float(scale)));
# the number of different viewpoints from which we want to render the mesh.
num_views = 20
# Get a batch of viewing angles.
elev = torch.linspace(0, 360, num_views)
azim = torch.linspace(-180, 180, num_views)
# Place a point light in front of the object. As mentioned above, the front of
# the cow is facing the -z direction.
lights = PointLights(device=device, location=[[0.0, 0.0, -3.0]])
# Initialize an OpenGL perspective camera that represents a batch of different
# viewing angles. All the cameras helper methods support mixed type inputs and
# broadcasting. So we can view the camera from the a distance of dist=2.7, and
# then specify elevation and azimuth angles for each viewpoint as tensors.
R, T = look_at_view_transform(dist=2.7, elev=elev, azim=azim)
cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
# We arbitrarily choose one particular view that will be used to visualize
# results
camera = OpenGLPerspectiveCameras(device=device, R=R[None, 1, ...],
T=T[None, 1, ...])
# Define the settings for rasterization and shading. Here we set the output
# image to be of size 128X128. As we are rendering images for visualization
# purposes only we will set faces_per_pixel=1 and blur_radius=0.0. Refer to
# rasterize_meshes.py for explanations of these parameters. We also leave
# bin_size and max_faces_per_bin to their default values of None, which sets
# their values using heuristics and ensures that the faster coarse-to-fine
# rasterization method is used. Refer to docs/notes/renderer.md for an
# explanation of the difference between naive and coarse-to-fine rasterization.
raster_settings = RasterizationSettings(
image_size=128,
blur_radius=0.0,
faces_per_pixel=1,
)
# Create a Phong renderer by composing a rasterizer and a shader. The textured
# Phong shader will interpolate the texture uv coordinates for each vertex,
# sample from a texture image and apply the Phong lighting model
renderer = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=camera,
raster_settings=raster_settings
),
shader=SoftPhongShader(
device=device,
cameras=camera,
lights=lights
)
)
# Create a batch of meshes by repeating the cow mesh and associated textures.
# Meshes has a useful `extend` method which allows us do this very easily.
# This also extends the textures.
meshes = mesh.extend(num_views)
# Render the cow mesh from each viewing angle
target_images = renderer(meshes, cameras=cameras, lights=lights)
# Our multi-view cow dataset will be represented by these 2 lists of tensors,
# each of length num_views.
target_rgb = [target_images[i, ..., :3] for i in range(num_views)]
target_cameras = [OpenGLPerspectiveCameras(device=device, R=R[None, i, ...],
T=T[None, i, ...]) for i in range(num_views)]
# RGB images
image_grid(target_images.cpu().numpy(), rows=4, cols=5, rgb=True)
plt.show()
# Rasterization settings for silhouette rendering
sigma = 1e-4
raster_settings_silhouette = RasterizationSettings(
image_size=128,
blur_radius=np.log(1. / 1e-4 - 1.)*sigma,
faces_per_pixel=50,
)
# Silhouette renderer
renderer_silhouette = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=camera,
raster_settings=raster_settings_silhouette
),
shader=SoftSilhouetteShader()
)
# Render silhouette images. The 3rd channel of the rendering output is
# the alpha/silhouette channel
silhouette_images = renderer_silhouette(meshes, cameras=cameras, lights=lights)
target_silhouette = [silhouette_images[i, ..., 3] for i in range(num_views)]
# Visualize silhouette images
image_grid(silhouette_images.cpu().numpy(), rows=4, cols=5, rgb=False)
plt.show()
# Show a visualization comparing the rendered predicted mesh to the ground truth
# mesh
def visualize_prediction(predicted_mesh, renderer=renderer_silhouette,
target_image=target_rgb[1], title='',
silhouette=False):
inds = 3 if silhouette else range(3)
with torch.no_grad():
predicted_images = renderer(predicted_mesh)
plt.figure(figsize=(20, 10))
plt.subplot(1, 2, 1)
plt.imshow(predicted_images[0, ..., inds].cpu().detach().numpy())
plt.subplot(1, 2, 2)
plt.imshow(target_image.cpu().detach().numpy())
plt.title(title)
plt.axis("off")
# Plot losses as a function of optimization iteration
def plot_losses(losses):
fig = plt.figure(figsize=(13, 5))
ax = fig.gca()
for k, l in losses.items():
ax.plot(l['values'], label=k + " loss")
ax.legend(fontsize="16")
ax.set_xlabel("Iteration", fontsize="16")
ax.set_ylabel("Loss", fontsize="16")
ax.set_title("Loss vs iterations", fontsize="16")
# We initialize the source shape to be a sphere of radius 1.
src_mesh = ico_sphere(4, device)
# Rasterization settings for differentiable rendering, where the blur_radius
# initialization is based on Liu et al, 'Soft Rasterizer: A Differentiable
# Renderer for Image-based 3D Reasoning', ICCV 2019
sigma = 1e-4
raster_settings_soft = RasterizationSettings(
image_size=128,
blur_radius=np.log(1. / 1e-4 - 1.)*sigma,
faces_per_pixel=50,
)
# Silhouette renderer
renderer_silhouette = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=camera,
raster_settings=raster_settings_soft
),
shader=SoftSilhouetteShader()
)
# Number of views to optimize over in each SGD iteration
num_views_per_iteration = 2
# Number of optimization steps
Niter = 2000
# Plot period for the losses
plot_period = 250
%matplotlib inline
# Optimize using rendered silhouette image loss, mesh edge loss, mesh normal
# consistency, and mesh laplacian smoothing
losses = {"silhouette": {"weight": 1.0, "values": []},
"edge": {"weight": 1.0, "values": []},
"normal": {"weight": 0.01, "values": []},
"laplacian": {"weight": 1.0, "values": []},
}
# Losses to smooth / regularize the mesh shape
def update_mesh_shape_prior_losses(mesh, loss):
# and (b) the edge length of the predicted mesh
loss["edge"] = mesh_edge_loss(mesh)
# mesh normal consistency
loss["normal"] = mesh_normal_consistency(mesh)
# mesh laplacian smoothing
loss["laplacian"] = mesh_laplacian_smoothing(mesh, method="uniform")
# We will learn to deform the source mesh by offsetting its vertices
# The shape of the deform parameters is equal to the total number of vertices in
# src_mesh
verts_shape = src_mesh.verts_packed().shape
deform_verts = torch.full(verts_shape, 0.0, device=device, requires_grad=True)
# The optimizer
optimizer = torch.optim.SGD([deform_verts], lr=1.0, momentum=0.9)
loop = tqdm(range(Niter))
for i in loop:
# Initialize optimizer
optimizer.zero_grad()
# Deform the mesh
new_src_mesh = src_mesh.offset_verts(deform_verts)
# Losses to smooth /regularize the mesh shape
loss = {k: torch.tensor(0.0, device=device) for k in losses}
update_mesh_shape_prior_losses(new_src_mesh, loss)
# Compute the average silhouette loss over two random views, as the average
# squared L2 distance between the predicted silhouette and the target
# silhouette from our dataset
for j in np.random.permutation(num_views).tolist()[:num_views_per_iteration]:
images_predicted = renderer_silhouette(new_src_mesh, cameras=target_cameras[j], lights=lights)
predicted_silhouette = images_predicted[..., 3]
loss_silhouette = ((predicted_silhouette - target_silhouette[j]) ** 2).mean()
loss["silhouette"] += loss_silhouette / num_views_per_iteration
# Weighted sum of the losses
sum_loss = torch.tensor(0.0, device=device)
for k, l in loss.items():
sum_loss += l * losses[k]["weight"]
losses[k]["values"].append(float(l.detach().cpu()))
# Print the losses
loop.set_description("total_loss = %.6f" % sum_loss)
# Plot mesh
if i % plot_period == 0:
visualize_prediction(new_src_mesh, title="iter: %d" % i, silhouette=True,
target_image=target_silhouette[1])
# Optimization step
sum_loss.backward()
optimizer.step()
visualize_prediction(new_src_mesh, silhouette=True,
target_image=target_silhouette[1])
plot_losses(losses)
# Rasterization settings for differentiable rendering, where the blur_radius
# initialization is based on Liu et al, 'Soft Rasterizer: A Differentiable
# Renderer for Image-based 3D Reasoning', ICCV 2019
sigma = 1e-4
raster_settings_soft = RasterizationSettings(
image_size=128,
blur_radius=np.log(1. / 1e-4 - 1.)*sigma,
faces_per_pixel=50,
)
# Differentiable soft renderer using per vertex RGB colors for texture
renderer_textured = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=camera,
raster_settings=raster_settings_soft
),
shader=SoftPhongShader(device=device,
cameras=camera,
lights=lights)
)
# Number of views to optimize over in each SGD iteration
num_views_per_iteration = 2
# Number of optimization steps
Niter = 2000
# Plot period for the losses
plot_period = 250
%matplotlib inline
# Optimize using rendered RGB image loss, rendered silhouette image loss, mesh
# edge loss, mesh normal consistency, and mesh laplacian smoothing
losses = {"rgb": {"weight": 1.0, "values": []},
"silhouette": {"weight": 1.0, "values": []},
"edge": {"weight": 1.0, "values": []},
"normal": {"weight": 0.01, "values": []},
"laplacian": {"weight": 1.0, "values": []},
}
# We will learn to deform the source mesh by offsetting its vertices
# The shape of the deform parameters is equal to the total number of vertices in
# src_mesh
verts_shape = src_mesh.verts_packed().shape
deform_verts = torch.full(verts_shape, 0.0, device=device, requires_grad=True)
# We will also learn per vertex colors for our sphere mesh that define texture
# of the mesh
sphere_verts_rgb = torch.full([1, verts_shape[0], 3], 0.5, device=device, requires_grad=True)
# The optimizer
optimizer = torch.optim.SGD([deform_verts, sphere_verts_rgb], lr=1.0, momentum=0.9)
loop = tqdm(range(Niter))
for i in loop:
# Initialize optimizer
optimizer.zero_grad()
# Deform the mesh
new_src_mesh = src_mesh.offset_verts(deform_verts)
# Add per vertex colors to texture the mesh
new_src_mesh.textures = TexturesVertex(verts_features=sphere_verts_rgb)
# Losses to smooth /regularize the mesh shape
loss = {k: torch.tensor(0.0, device=device) for k in losses}
update_mesh_shape_prior_losses(new_src_mesh, loss)
# Randomly select two views to optimize over in this iteration. Compared
# to using just one view, this helps resolve ambiguities between updating
# mesh shape vs. updating mesh texture
for j in np.random.permutation(num_views).tolist()[:num_views_per_iteration]:
images_predicted = renderer_textured(new_src_mesh, cameras=target_cameras[j], lights=lights)
# Squared L2 distance between the predicted silhouette and the target
# silhouette from our dataset
predicted_silhouette = images_predicted[..., 3]
loss_silhouette = ((predicted_silhouette - target_silhouette[j]) ** 2).mean()
loss["silhouette"] += loss_silhouette / num_views_per_iteration
# Squared L2 distance between the predicted RGB image and the target
# image from our dataset
predicted_rgb = images_predicted[..., :3]
loss_rgb = ((predicted_rgb - target_rgb[j]) ** 2).mean()
loss["rgb"] += loss_rgb / num_views_per_iteration
# Weighted sum of the losses
sum_loss = torch.tensor(0.0, device=device)
for k, l in loss.items():
sum_loss += l * losses[k]["weight"]
losses[k]["values"].append(float(l.detach().cpu()))
# Print the losses
loop.set_description("total_loss = %.6f" % sum_loss)
# Plot mesh
if i % plot_period == 0:
visualize_prediction(new_src_mesh, renderer=renderer_textured, title="iter: %d" % i, silhouette=False)
# Optimization step
sum_loss.backward()
optimizer.step()
visualize_prediction(new_src_mesh, renderer=renderer_textured, silhouette=False)
plot_losses(losses)
# Fetch the verts and faces of the final predicted mesh
final_verts, final_faces = new_src_mesh.get_mesh_verts_faces(0)
# Scale normalize back to the original target size
final_verts = final_verts * scale + center
# Store the predicted mesh using save_obj
final_obj = os.path.join('./', 'final_model.obj')
save_obj(final_obj, final_verts, final_faces)
| 0.610221 | 0.939471 |
# Lecture 15: Matrix functions and matrix equations
## Previous part
- Toeplitz matrices, circulant matrices, FFT
## Today lecture
- Matrix functions and matrix equations
## Outline of this part
- What is a matrix function
- Matrix exponential
- (Some) applications
Book to read: [Functions of matrices by Nick Higham](http://www.google.ru/books?hl=ru&lr=&id=2Wz_zVUEwPkC&oi=fnd&pg=PR3&dq=Higham+matrix+function&ots=pTt6fpLGRX&sig=DgUuX-SpBZGin8CFUo-4MYnOcHE&redir_esc=y#v=onepage&q=Higham%20matrix%20function&f=false)
## The simplest matrix function: matrix polynomial
It is very easy to define a matrix polynomial as
$$ P(A) = \sum_{k=0}^n c_k A^k. $$
**Side-note:** [Hamilton-Cayley theorem](https://en.wikipedia.org/wiki/Cayley%E2%80%93Hamilton_theorem) states that $F(A) = 0$ where $F(\lambda) = \det(A - \lambda I)$, thus all matrix polynomials have degree $\leq n-1$.
## Matrix polynomials as building blocks
We can define a function of the matrix by **Taylor series**:
$$ f(A) = \sum_{k=0}^{\infty} c_k A^k. $$
The convergence is understood as the convergence in some **matrix norm**.
Example of such series is the **Neumann series**
$$ (I - F)^{-1} = \sum_{k=0}^{\infty} F^k, $$
which is well defined for $\rho(F) < 1$.
## Matrix exponential series
The most well-known matrix function is **matrix exponential**. In the scalar case,
$$ e^x = 1 + x + \frac{x^2}{2} + \frac{x^3}{6} + \ldots = \sum_{k=0}^{\infty} \frac{x^k}{k!}, $$
and it directly translates to the matrix case:
$$ e^A = \sum_{k=0}^{\infty} \frac{A^k}{k!}, $$
the series that always converges, because the series
$$\sum_{k=0}^{\infty} \frac{\Vert A \Vert^k}{k!} = e^{\Vert A \Vert}.$$
## Why matrix exponential is important
A **lot of** practical problems are reduced to a system of linear ODEs of the form
$$ \frac{dy}{dt} = Ay, \quad y(0) = y_0. $$
## ODE and matrix exponentials
- Given the equation
$$\frac{dy}{dt} = Ay, \quad y(0) = y_0$$
- The formal solution is given by $y(t) = e^{At} y_0$, so if we know $e^{At}$ (or can compute matrix-by-vector product fast) there is a big gain over the time-stepping schemes.
- Indeed,
$$\frac{d}{dt} e^{At} = \frac{d}{dt} \sum_{k=0}^{\infty} \frac{t^k A^k}{k!} = \sum_{k=1}^{\infty} \frac{t^{k-1} A^{k}}{(k-1)!} = A e^{At}.$$
## Sidenote: matrix exponential and time stepping
Matrix exponential can be much better than solving using, say, Euler scheme:
$$\frac{dy}{dt} \approx \frac{y_{k+1} - y_k}{\tau} = A y_k, \quad y_{k+1} = y_k + \tau A y_k,$$
if we know how to compute the product of the matrix exponential by vector using only matrix-by-vector product.
For dense matrices matrix exponential also provides **exact** answer to the ODE for any $t$, compared to the **approximation** by time-stepping schemes.
## How to compute matrix functions, including exponential?
- There are many ways, even for the matrix exponential!
- See [C. Van Loan, C. Moler, Nineteen Dubious Ways to Compute the Exponential of a Matrix, Twenty-Five Years Later](http://www.cs.cornell.edu/cv/researchpdf/19ways+.pdf)
- The simplest way is to diagonalize the matrix:
$$ A = S \Lambda S^{-1}, $$
where the columns of $S$ are **eigenvectors** of the matrix $A$, then
$$ F(A) = S F(\Lambda) S^{-1}. $$
**Problem: diagonalization can be unstable!** (and not every matrix is diagonalizable)
Let us look how matrices are diagonalizable:
```
import numpy as np
eps = 1e-4
p = 4
a = np.eye(p)
for i in range(p-1):
a[i, i+1] = 1
a[p-1, 2] = eps
val, vec = np.linalg.eig(a)
#print a
print(np.linalg.norm(a - vec.dot(val[:, np.newaxis] * np.linalg.inv(vec))))
#print 'S * D * S^{-1}:'
print(vec.dot(val[:, np.newaxis] * np.linalg.inv(vec)))
print(a)
```
Now we can compute a function for **perturbed Jordan block.**
```
import numpy as np
eps = 1e-16
p = 5
a = np.eye(p)
for i in range(p-1):
a[i, i+1] = 1
a[p-1, 0] = eps
a = np.array(a)
val, vec = np.linalg.eig(a)
print(np.linalg.norm(a - vec.dot(np.diag(val)).dot(np.linalg.inv(vec))))
fun = lambda x: np.exp(x)
#Using diagonalization
fun_diag = vec.dot(np.diag(fun(val))).dot(np.linalg.inv(vec))
#Using Schur
import scipy.linalg
fun_m = scipy.linalg.expm(a)
print('Difference = {}'.format(np.linalg.norm(fun_m - fun_diag)))
```
## How ```funm``` function works
- The exponential of a matrix is a special function, so there are special methods for its computation.
- For a general function $F$, there is a beautiful **Schur-Parlett algorithm**, which is based on the **Schur theorem**
## Schur-Parlett algorithm
- Given a matrix $A$ we want to compute $F(A)$, and we only can evaluate $F$ at **scalar points**.
- First, we reduce $A$ to the **triangular form** as
$$ A = U T U^*. $$
- Therefore, $F(A)=U F(T) U^*$
- We only need to compute the function of triangular matrices.
## Computing functions of triangular matrices
We know values on the diagonals
$$ F_{ii} = F(T_{ii}), $$
and also we know that
$$ F T = T F $$
the matrix function commutes with the matrix itself. The function of a triangular matrix is a triangular matrix as well.
Using the known values on the diagonal and the commutativity property, we get the diagonals of the matrix one-by-one:
$$f_{ij} = t_{ij} \frac{f_{ii} - f_{jj}}{t_{ii} - t_{jj}} + \sum_{k=i+1}^{j-1} \frac{f_{ik} t_{kj} - t_{ik}f_{kj}}{t_{ii} - t_{jj}}.$$
## Matrix functions: definition
- One way to define a matrix function $f(A)$ is to use **Jordan canonical form**.
- A much more elegant way is to use **Cauchy integral representation:**
$$
f(A) = \int_{\Gamma} f(z) (zI - A)^{-1} dz,
$$
]where $f(z)$ is analytic on and inside a closed contour $\Gamma$ that encloses the spectrum of $A$.
- This definition can be generalized to the **operator case.**
## Important matrix functions
- Matrix exponential, used to solve $\frac{dy}{dt} = Ay$ in the "explicit" way, $y = y_0 e^{At}.$
- $\cos(A), \sin(A)$ used to solve wave equation $\frac{d^2 y}{dt^2} + Ay = 0.$
- Sign function, $\mathrm{sign}(A)$, used to compute **spectral projections.**
- Inverse square root $A^{-1/2}$ used in many places, for example, to generate samples from a Gaussian distributions
## Matrix exponential
- The matrix exponential is given by the following series:
$$e^A = I + A + \frac{1}{2} A^2 + \frac{1}{3!} A^3 + \ldots$$
- This series is a bad idea (even for a scalar case, can you guess why?)
- This form for $e^A$ almost assumes a **Krylov method** for the evaluation of $e^{At} y_0,$ by the way.
```
import numpy as np
x = -30.0 #Point
k = 1000000 #Number of terms
b = 1.0
x0 = x
for i in range(1, k):
b += x0
x0 *= x/(i+1)
print('Error in the exponent: {}'.format((b - np.exp(x))/np.exp(x)))
```
## Series convergence
- The series convergence for the matrix exponential can be slow for large $x!$ (and slow for big norm).
- What we can do?
## Method 1: Krylov method
- We can use the idea of Krylov method: using the Arnoldi method, generate the orthogonal basis in the Krylov subspace, and compute (it can be used in general for any function)
$$ f(A)v \approx f(Q H Q^*)v = Q f(H) Q^*v,$$
where $H$ is a small upper Hessenberg matrix, for which we can use, for example, the **Schur-Parlett algorithm.**
- The convergence of the Krylov method can be quite slow: it is actually a **polynomial approximation** to a function.
- And convergence of polynomial approximation to the matrix function **can be slow.**
- **Idea:** Replace by rational approximation!
## Pade approximations
- Matrix exponential is well approximated by **rational function**:
$$
\exp(x) \approx \frac{p(x)}{q(x)},
$$
where $p(x)$ and $q(x)$ are polynomials and computation of a rational function of a matrix is reduced to **matrix-matrix products** and **matrix inversions**.
- The rational form is also very useful when only a product of a matrix exponential by vector is needed, since evaluation reduces to **matrix-by-vector products** and **linear systems solvers**
```
#Computing Pade approximant
import numpy as np
import mpmath
%matplotlib inline
from mpmath import pade, taylor, polyval
import matplotlib.pyplot as plt
x = np.linspace(-5, -1, 128)
a = taylor(mpmath.exp, 0, 20) #Taylor series
k1 = 10
k2 = 10
p, q = pade(a, k1, k2) #Pade approximant
#plt.plot(x, polyval(p[::-1], x)/polyval(q[::-1], x) - np.exp(x))
plt.semilogy(x, polyval(a[::-1], x) - np.exp(x))
_ = plt.title('Error of the Pade of order {0:d}/{1:d}'.format(k1, k2) )
```
## Scaling & squaring algorithm
The "canonical algorithm" for the computation of the matrix exponential also relies on **scaling** of the matrix $A:$
$$\exp(A) = \exp(A/2^k)^{(2^k)}.$$
The matrix then can have a small norm, thus:
- Scale the matrix as $B := A/2^k$ to make it norm less than $1$.
- Compute exponent of $C = e^B$ by a **Pade approximant**
- Square $e^A \approx C^{(2^k)}$ in $k$ matrix-by-matrix products.
## Large-scale matrix exponentials
- Large-scale matrices obviously do not allow for efficient scaling-and-squaring (need to work with dense matrices), thus we can use **Krylov methods** or (better) Rational Krylov methods.
- The idea of a rational Krylov subspace is motivated by the idea of rational approximation instead of polynomial approximation.
- Krylov methods rely on polynomial approximations
## Rational Krylov subspaces
The simplest (yet efficient) approach is based on the so-called **extended Krylov subspaces:**
$$KE(A, b) = \mathrm{Span}(\ldots, A^{-2} b, A^{-1} b, b, A b, A^2 b, \ldots)$$
At each step you add a vector of the form $A w$ and $A^{-1} w$ to the subspace, and orthogonalize the result (**rational Arnoldi method**).
I.e., we need only linear system solver for one step, but since the matrix $A$ is fixed, we can **factorize it** once
## Rational Krylov methods
Rational Krylov methods are the most efficient for the computation of matrix functions:
- we construct an orthogonal basis in the span,
$$KE(A, b) = \mathrm{Span}(\ldots, A^{-2} b, A^{-1} b, b, A b, A^2 b, \ldots)$$
- compute
$$f(A)b \approx Q f(H) Q^*b,$$
where $H = Q^* A Q.$
It requires one solver and matrix-by-vector product at each step.
## Application to compute distance between manifolds
- Represent two manifolds $\mathcal{M}$ and $\mathcal{N}$ with point clouds
- Construct two graphs from these point clouds
- Every graph has its own graph laplacian ($L_{\mathcal{M}}$ and $L_{\mathcal{N}}$) (check the lecture about Fiedler vector!)
- Heat kernel trace
$$\mathrm{hkt}_{\mathcal{M}}(t) = \mathrm{trace}(\exp(-t L_{\mathcal{M}}))$$
contains all information about graph's spectrum
- Gromov-Wasserstein distance between manifolds $\mathcal{M}$ and $\mathcal{N}$:
$$d_{GW}(\mathcal{M}, \mathcal{N}) \geq \sup_{t > 0} \exp(-2(t + t^{-1}))|\mathrm{hkt}_{\mathcal{M}}(t) - \mathrm{hkt}_{\mathcal{N}}(t)|$$
### Stochastic trace estimator
- Hutchinson [proposes](https://www.tandfonline.com/doi/abs/10.1080/03610919008812866) the following method
$$ \mathrm{trace}(A) = \mathbb{E}_{p(x)}(x^{\top}Ax), $$
where $p(x)$ is distribution with zero mean and unit variance, e.g. Rademacher or standard normal distributions
- To estimate trace we need the fast matrix by vector product!
- And here the rational Krylov subspace helps a lot since $\mathrm{hkt}$ requires trace of matrix exponential
### Distances between languages ([original paper](https://openreview.net/pdf?id=HyebplHYwB))
<img src="./gw_matexp.png">
## Other matrix functions
Now, let us briefly talk about **other** matrix functions:
- sign
- inverse square root
## Sign function
- Sign function is defined as
$$\mathrm{sign}(x) = \begin{cases} 1, \quad x > 0, \\ -1, \quad x < 0. \end{cases}$$
- Given a matrix $A = U \Lambda U^*$, it effectively puts all the eigenvalues larger than $0$ to $1$, and all eigenvalues smaller than $0$ to $-1$, thus
$$P = \frac{(I + \mathrm{sign}(A))}{2}$$
is a **projector** onto the subspace spanned by all positive eigenvalues.
- Such projectors can be very useful in **large-scale** eigenvalue computations, when you only need to find a subspace.
## How to compute sign function?
- There is a very simple iteration to compute the sign function, namely
$$X_{k+1} = \frac{1}{2} (X_k + X^{-1}_k), X_0 = \alpha A.$$
- This iteration converges **quadratically** to the sign function.
- You can also get a polynomial iteration, [proposed by R. Byers](http://www.sciencedirect.com/science/article/pii/0024379587902229)
$$X_{k+1} = \frac{1}{2} X_k (3 I - X_k), \quad X_0 = \alpha A.$$
## Matrix sign function: applications
- One of the important applications of the matrix sign function is the solution of the **Algebraic Riccati equation**
$$A^* X + X A - X R X + G = 0,$$
which arises in optimal control and stochastic control.
- Solving **ARE** is equivalent to finding a **stable** invariant subspace (i.e., corresponding to the negative eigenvalues) of the matrix
$$
C = \begin{bmatrix} A^* & G \\ R & -A \end{bmatrix}.
$$
## Inverse square root of the matrix
- The inverse square root of the matrix, $A^{-1/2}$ is also often important.
- For example, the multidimensional Gaussian distribution with covariance matrix $A = A^* > 0$ is given by the
$$e^{A^{-1} x, x}.$$
- Suppose $x$ is really huge (millions), how we **generate samples**, given a structured matrix $A$?
- The simplest algorithm is to generate a normally distributed vector $y$ with $y_i$ from $N(0, 1)$, and then compute
$$x = A^{-\frac{1}{2}} y.$$
- The vector $x$ will have the desired distribution.
- To compute matrix square root it is very efficient to use **rational Krylov subspaces.**
## Matrix equations
- An equation of the form
$$F(X) = G, \quad X \in \mathbb{R}^{n \times m}$$
is called **matrix equation**.
- A linear matrix equation is when $X$ and $G$ are matrices, and $F$ is a linear operator.
## Two important matrix equations
We will discuss two matrix equations:
- **Sylvester equation** of the form
$$ A X + X B = C,$$
where $A$ and $B$ are given, and its special case, **continious Lyapunov equation**,
$$ A X + XA^{\top} = C,$$
and
- **discrete Lyapunov equation**
$$A X A^* - X = C. $$
## Application of the Lyapunov equation
- Lyapunov equation is very important for the stability of dynamical systems, and also for model order reduction.
$$\frac{dy}{dt} = Ay, \quad y(0) = y_0,$$
$$y(t) \rightarrow 0$$ for $t \rightarrow \infty$.
- System is stable, iff for any $Q = Q^* > 0$ there exists a unique positive definite solution $P$ of the Lyapunov equation
$$A P + P A^* = Q.$$
- The stability then can be checked without finding eigenvalues.
## Application to model order reduction
Model order reduction of linear time-invariant systems:
$$\frac{dx}{dt} = Ax + Bu, \quad y = C x,$$
where $x$ is **state**, $u$ is control, and $y$ is the observable. We want to approximate it by a smaller-dimensional linear system
$$
\frac{d\widehat{x}}{dt} = \widehat{A} \widehat{x} + \widehat{B} u, \quad y = \widehat{C} \widehat{x},
$$
in such a way that the output of the reduced system is close to the output of the original (big one).
The optimal $\widehat{A}, \widehat{B}, \widehat{C}$ can be recovered from the solution of the auxiliary Lyaupunov equations.
## Solution of the Sylvester equation
$$ A X + X B = C,$$
- This is a system of linear equations for $X$.
- It can be rewritten as a linear system using the **vec** and **Kronecker product** operations.
- First, we introduce the $\mathrm{vec}$ operation by taking the element of a matrix into a one long vector.
## Kronecker product
A Kronecker product of two matrices $A \in \mathbb{R}^{n_1 \times m_1}, \quad B \in \mathbb{R}^{n_2 \times m_2}$ is a matrix $C$ of size $(n_1 n_2) \times (m_1 m_2)$.
Of the block form
$$A \otimes B = [a_{ij} B].$$
## Main property of the Kronecker product and vec
We have
$$\mathrm{vec}(A X B^{\top}) = (B \otimes A) \mathrm{vec}(X).$$
## Rewriting the Sylvester equation
$$\mathrm{vec}(A X B^{\top}) = (B \otimes A) \mathrm{vec}(X).$$
- We can use it to rewrite the Sylvester equation
$$ A X + X B = C $$
in the form
$$\mathrm{vec}(AX + X B) = (I \otimes A + B^{\top} \otimes I) \mathrm{vec}(X) = \mathrm{vec}(C).$$
- Thus, we need to solve a linear system with the matrix
$$(I \otimes A + B^{\top} \otimes I)$$
- It is a matrix of size $n^2$, thus Gaussian elimination will take $\mathcal{O}(n^6)$ operations.
- We can do it in $\mathcal{O}(n^3)$ operations!
## Solving Sylvester equation: Bartels-Stewart method
$$(I \otimes A + B^{\top} \otimes I) x = c.$$
Let us compute Schur decomposition of $A$ and $B$:
$$A = Q_A T_A Q^*_A, \quad B^{\top} = Q_B T_B Q^*_B.$$
Then, we have
$$(I \otimes A + B^{\top} \otimes I) = (I \otimes ( Q_A T_A Q^*_A ) + (Q_B T_B Q^*_B \otimes I) = (Q_B \otimes Q_A) ( I \otimes T_A + T_B \otimes I) (Q^* _B \otimes Q^*_A). $$
We have
$$(Q_B \otimes Q_A)^{-1} = Q^*_B \otimes Q^*_A,$$
thus we only need to solve an auxiliary linear system with the matrix
$$I \otimes T_A + T_B \otimes I.$$
Note, that if $A$ and $B$ are Hermitian, then $T_A$ and $T_B$ are diagonal, and this matrix is diagonal!
## Solving a final system
We have the system
$$(I \otimes T_A + T_B \otimes I) z = g,$$
in the matrix form:
$$T_A Z + Z T^{\top}_B = G.$$
Then we just write the equation elementwise and see that the equations are solved successively for $Z_{11}, Z_{21}, \ldots, $.
## Take home message
- Matrix functions: matrix exponential, methods to compute matrix exponential, other matrix functions
- Matrix equations (Lyapunov equations, Sylvester equation)
## Plan for the next class
- Iterative methods for large scale eigenvalue problems
- Intro to streaming algorithms
```
from IPython.core.display import HTML
def css_styling():
styles = open("./styles/custom.css", "r").read()
return HTML(styles)
css_styling()
```
|
github_jupyter
|
import numpy as np
eps = 1e-4
p = 4
a = np.eye(p)
for i in range(p-1):
a[i, i+1] = 1
a[p-1, 2] = eps
val, vec = np.linalg.eig(a)
#print a
print(np.linalg.norm(a - vec.dot(val[:, np.newaxis] * np.linalg.inv(vec))))
#print 'S * D * S^{-1}:'
print(vec.dot(val[:, np.newaxis] * np.linalg.inv(vec)))
print(a)
import numpy as np
eps = 1e-16
p = 5
a = np.eye(p)
for i in range(p-1):
a[i, i+1] = 1
a[p-1, 0] = eps
a = np.array(a)
val, vec = np.linalg.eig(a)
print(np.linalg.norm(a - vec.dot(np.diag(val)).dot(np.linalg.inv(vec))))
fun = lambda x: np.exp(x)
#Using diagonalization
fun_diag = vec.dot(np.diag(fun(val))).dot(np.linalg.inv(vec))
#Using Schur
import scipy.linalg
fun_m = scipy.linalg.expm(a)
print('Difference = {}'.format(np.linalg.norm(fun_m - fun_diag)))
import numpy as np
x = -30.0 #Point
k = 1000000 #Number of terms
b = 1.0
x0 = x
for i in range(1, k):
b += x0
x0 *= x/(i+1)
print('Error in the exponent: {}'.format((b - np.exp(x))/np.exp(x)))
#Computing Pade approximant
import numpy as np
import mpmath
%matplotlib inline
from mpmath import pade, taylor, polyval
import matplotlib.pyplot as plt
x = np.linspace(-5, -1, 128)
a = taylor(mpmath.exp, 0, 20) #Taylor series
k1 = 10
k2 = 10
p, q = pade(a, k1, k2) #Pade approximant
#plt.plot(x, polyval(p[::-1], x)/polyval(q[::-1], x) - np.exp(x))
plt.semilogy(x, polyval(a[::-1], x) - np.exp(x))
_ = plt.title('Error of the Pade of order {0:d}/{1:d}'.format(k1, k2) )
from IPython.core.display import HTML
def css_styling():
styles = open("./styles/custom.css", "r").read()
return HTML(styles)
css_styling()
| 0.210523 | 0.991032 |
# Kalman Filter
Kalman filters are linear models for state estimation of dynamic systems [1]. They have been the <i>de facto</i> standard in many robotics and tracking/prediction applications because they are well suited for systems with uncertainty about an observable dynamic process. They use a "observe, predict, correct" paradigm to extract information from an otherwise noisy signal. In Pyro, we can build differentiable Kalman filters with learnable parameters using the `pyro.contrib.tracking` [library](http://docs.pyro.ai/en/dev/contrib.tracking.html#module-pyro.contrib.tracking.extended_kalman_filter)
## Dynamic process
To start, consider this simple motion model:
$$ X_{k+1} = FX_k + \mathbf{W}_k $$
$$ \mathbf{Z}_k = HX_k + \mathbf{V}_k $$
where $k$ is the state, $X$ is the signal estimate, $Z_k$ is the observed value at timestep $k$, $\mathbf{W}_k$ and $\mathbf{V}_k$ are independent noise processes (ie $\mathbb{E}[w_k v_j^T] = 0$ for all $j, k$) which we'll approximate as Gaussians. Note that the state transitions are linear.
## Kalman Update
At each time step, we perform a prediction for the mean and covariance:
$$ \hat{X}_k = F\hat{X}_{k-1}$$
$$\hat{P}_k = FP_{k-1}F^T + Q$$
and a correction for the measurement:
$$ K_k = \hat{P}_k H^T(H\hat{P}_k H^T + R)^{-1}$$
$$ X_k = \hat{X}_k + K_k(z_k - H\hat{X}_k)$$
$$ P_k = (I-K_k H)\hat{P}_k$$
where $X$ is the position estimate, $P$ is the covariance matrix, $K$ is the Kalman Gain, and $Q$ and $R$ are covariance matrices.
For an in-depth derivation, see \[1\]
## Nonlinear Estimation: Extended Kalman Filter
What if our system is non-linear, eg in GPS navigation? Consider the following non-linear system:
$$ X_{k+1} = \mathbf{f}(X_k) + \mathbf{W}_k $$
$$ \mathbf{Z}_k = \mathbf{h}(X_k) + \mathbf{V}_k $$
Notice that $\mathbf{f}$ and $\mathbf{h}$ are now (smooth) non-linear functions.
The Extended Kalman Filter (EKF) attacks this problem by using a local linearization of the Kalman filter via a [Taylors Series expansion](https://en.wikipedia.org/wiki/Taylor_series).
$$ f(X_k, k) \approx f(x_k^R, k) + \mathbf{H}_k(X_k - x_k^R) + \cdots$$
where $\mathbf{H}_k$ is the Jacobian matrix at time $k$, $x_k^R$ is the previous optimal estimate, and we ignore the higher order terms. At each time step, we compute a Jacobian conditioned the previous predictions (this computation is handled by Pyro under the hood), and use the result to perform a prediction and update.
Omitting the derivations, the modification to the above predictions are now:
$$ \hat{X}_k \approx \mathbf{f}(X_{k-1}^R)$$
$$ \hat{P}_k = \mathbf{H}_\mathbf{f}(X_{k-1})P_{k-1}\mathbf{H}_\mathbf{f}^T(X_{k-1}) + Q$$
and the updates are now:
$$ X_k \approx \hat{X}_k + K_k\big(z_k - \mathbf{h}(\hat{X}_k)\big)$$
$$ K_k = \hat{P}_k \mathbf{H}_\mathbf{h}(\hat{X}_k) \Big(\mathbf{H}_\mathbf{h}(\hat{X}_k)\hat{P}_k \mathbf{H}_\mathbf{h}(\hat{X}_k) + R_k\Big)^{-1} $$
$$ P_k = \big(I - K_k \mathbf{H}_\mathbf{h}(\hat{X}_k)\big)\hat{P}_K$$
In Pyro, all we need to do is create an `EKFState` object and use its `predict` and `update` methods. Pyro will do exact inference to compute the innovations and we will use SVI to learn a MAP estimate of the position and measurement covariances.
As an example, let's look at an object moving at near-constant velocity in 2-D in a discrete time space over 100 time steps.
```
import os
import math
import torch
import pyro
import pyro.distributions as dist
from pyro.infer.autoguide import AutoDelta
from pyro.optim import Adam
from pyro.infer import SVI, Trace_ELBO, config_enumerate
from pyro.contrib.tracking.extended_kalman_filter import EKFState
from pyro.contrib.tracking.distributions import EKFDistribution
from pyro.contrib.tracking.dynamic_models import NcvContinuous
from pyro.contrib.tracking.measurements import PositionMeasurement
smoke_test = ('CI' in os.environ)
assert pyro.__version__.startswith('0.5.0')
pyro.enable_validation(True)
dt = 1e-2
num_frames = 10
dim = 4
# Continuous model
ncv = NcvContinuous(dim, 2.0)
# Truth trajectory
xs_truth = torch.zeros(num_frames, dim)
# initial direction
theta0_truth = 0.0
# initial state
with torch.no_grad():
xs_truth[0, :] = torch.tensor([0.0, 0.0, math.cos(theta0_truth), math.sin(theta0_truth)])
for frame_num in range(1, num_frames):
# sample independent process noise
dx = pyro.sample('process_noise_{}'.format(frame_num), ncv.process_noise_dist(dt))
xs_truth[frame_num, :] = ncv(xs_truth[frame_num-1, :], dt=dt) + dx
```
Next, let's specify the measurements. Notice that we only measure the positions of the particle.
```
# Measurements
measurements = []
mean = torch.zeros(2)
# no correlations
cov = 1e-5 * torch.eye(2)
with torch.no_grad():
# sample independent measurement noise
dzs = pyro.sample('dzs', dist.MultivariateNormal(mean, cov).expand((num_frames,)))
# compute measurement means
zs = xs_truth[:, :2] + dzs
```
We'll use a [Delta autoguide](http://docs.pyro.ai/en/dev/infer.autoguide.html#autodelta) to learn MAP estimates of the position and measurement covariances. The `EKFDistribution` computes the joint log density of all of the EKF states given a tensor of sequential measurements.
```
def model(data):
# a HalfNormal can be used here as well
R = pyro.sample('pv_cov', dist.HalfCauchy(2e-6)) * torch.eye(4)
Q = pyro.sample('measurement_cov', dist.HalfCauchy(1e-6)) * torch.eye(2)
# observe the measurements
pyro.sample('track_{}'.format(i), EKFDistribution(xs_truth[0], R, ncv,
Q, time_steps=num_frames),
obs=data)
guide = AutoDelta(model) # MAP estimation
optim = pyro.optim.Adam({'lr': 2e-2})
svi = SVI(model, guide, optim, loss=Trace_ELBO(retain_graph=True))
pyro.set_rng_seed(0)
pyro.clear_param_store()
for i in range(250 if not smoke_test else 2):
loss = svi.step(zs)
if not i % 10:
print('loss: ', loss)
# retrieve states for visualization
R = guide()['pv_cov'] * torch.eye(4)
Q = guide()['measurement_cov'] * torch.eye(2)
ekf_dist = EKFDistribution(xs_truth[0], R, ncv, Q, time_steps=num_frames)
states= ekf_dist.filter_states(zs)
```
|
github_jupyter
|
import os
import math
import torch
import pyro
import pyro.distributions as dist
from pyro.infer.autoguide import AutoDelta
from pyro.optim import Adam
from pyro.infer import SVI, Trace_ELBO, config_enumerate
from pyro.contrib.tracking.extended_kalman_filter import EKFState
from pyro.contrib.tracking.distributions import EKFDistribution
from pyro.contrib.tracking.dynamic_models import NcvContinuous
from pyro.contrib.tracking.measurements import PositionMeasurement
smoke_test = ('CI' in os.environ)
assert pyro.__version__.startswith('0.5.0')
pyro.enable_validation(True)
dt = 1e-2
num_frames = 10
dim = 4
# Continuous model
ncv = NcvContinuous(dim, 2.0)
# Truth trajectory
xs_truth = torch.zeros(num_frames, dim)
# initial direction
theta0_truth = 0.0
# initial state
with torch.no_grad():
xs_truth[0, :] = torch.tensor([0.0, 0.0, math.cos(theta0_truth), math.sin(theta0_truth)])
for frame_num in range(1, num_frames):
# sample independent process noise
dx = pyro.sample('process_noise_{}'.format(frame_num), ncv.process_noise_dist(dt))
xs_truth[frame_num, :] = ncv(xs_truth[frame_num-1, :], dt=dt) + dx
# Measurements
measurements = []
mean = torch.zeros(2)
# no correlations
cov = 1e-5 * torch.eye(2)
with torch.no_grad():
# sample independent measurement noise
dzs = pyro.sample('dzs', dist.MultivariateNormal(mean, cov).expand((num_frames,)))
# compute measurement means
zs = xs_truth[:, :2] + dzs
def model(data):
# a HalfNormal can be used here as well
R = pyro.sample('pv_cov', dist.HalfCauchy(2e-6)) * torch.eye(4)
Q = pyro.sample('measurement_cov', dist.HalfCauchy(1e-6)) * torch.eye(2)
# observe the measurements
pyro.sample('track_{}'.format(i), EKFDistribution(xs_truth[0], R, ncv,
Q, time_steps=num_frames),
obs=data)
guide = AutoDelta(model) # MAP estimation
optim = pyro.optim.Adam({'lr': 2e-2})
svi = SVI(model, guide, optim, loss=Trace_ELBO(retain_graph=True))
pyro.set_rng_seed(0)
pyro.clear_param_store()
for i in range(250 if not smoke_test else 2):
loss = svi.step(zs)
if not i % 10:
print('loss: ', loss)
# retrieve states for visualization
R = guide()['pv_cov'] * torch.eye(4)
Q = guide()['measurement_cov'] * torch.eye(2)
ekf_dist = EKFDistribution(xs_truth[0], R, ncv, Q, time_steps=num_frames)
states= ekf_dist.filter_states(zs)
| 0.64232 | 0.990121 |
<a href="https://colab.research.google.com/github/saketkc/pyFLGLM/blob/master/Chapters/07_Chapter07.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
## Chapter 7 - Models for Count Data
```
!pip install proplot
import warnings
import pandas as pd
import proplot as plot
import seaborn as sns
import statsmodels
import statsmodels.api as sm
import statsmodels.formula.api as smf
from patsy import dmatrices
from scipy import stats
warnings.filterwarnings("ignore")
%pylab inline
plt.rcParams["axes.labelweight"] = "bold"
plt.rcParams["font.weight"] = "bold"
cancer_df = pd.read_csv("https://github.com/saketkc/pyFLGLM/blob/master/data/Cancer.tsv.gz?raw=true", compression="gzip", sep="\t")
cancer_df.head()
cancer_df["logrisktime"] = np.log(cancer_df["risktime"])
formula = """count ~ C(histology) + C(stage) + C(time)"""
response, predictors = dmatrices(formula, cancer_df, return_type="dataframe")
fit = sm.GLM(
response, predictors, family=sm.families.Poisson(link=sm.families.links.log()),
offset=cancer_df["logrisktime"]
).fit()
print(fit.summary())
```
The increasing coefficients with stage reflect the higher mortality with stage. Stage 3 mortalites are $exp(1.324) = 3.76$ times higher than staeg 1.
```
drugs_df = pd.read_csv("https://github.com/saketkc/pyFLGLM/blob/master/data/Drugs.tsv.gz?raw=true", compression="gzip", sep="\t")
drugs_df = drugs_df.rename(columns={"A": "alc", "C": "cig", "M": "mar"})
drugs_df
formula = """count ~ C(alc) + C(cig) + C(mar)"""
response, predictors = dmatrices(formula, drugs_df, return_type="dataframe")
mutual_indep = sm.GLM(
response, predictors, family=sm.families.Poisson(link=sm.families.links.log())).fit()
print(mutual_indep.summary())
l = ["yes", "no"]
formula = """count ~ C(alc, levels=l) + C(cig, levels=l) + C(mar, levels=l) + C(alc, levels=l):C(cig, levels=l) + C(alc, levels=l):C(mar,levels=l) + C(cig,levels=l):C(mar,levels=l)"""
response, predictors = dmatrices(formula, drugs_df, return_type="dataframe")
homo_association = sm.GLM(
response, predictors, family=sm.families.Poisson(link=sm.families.links.log())).fit()
print(homo_association.summary())
print('AIC: {}'.format(homo_association.aic))
pearson_resid = homo_association.resid_pearson
std_resid = homo_association.resid_response
print(np.sum(pearson_resid**2))
counts = drugs_df["count"]
df = pd.DataFrame( np.vstack([counts.values,
homo_association.fittedvalues,
homo_association.resid_pearson,
homo_association.resid_response])).T
df.columns = ["count", "fitted", "pearsonr_resid", "std_resid"]
df
drugs2_df = pd.read_csv("https://github.com/saketkc/pyFLGLM/blob/master/data/Drugs2.tsv.gz?raw=true", compression="gzip", sep="\t")
drugs2_df = drugs2_df.rename(columns={"A": "alc", "C": "cig"})
drugs2_df["M_yes_byn"] = drugs2_df["M_yes"]/drugs2_df["n"]
l = ["yes", "no"]
#formula = """M_yes/n ~ C(alc, levels=l) + C(cig, levels=l)"""
#formula = """I(M_yes/n) ~ C(alc) + C(cig)"""
formula = """M_yes_byn ~ C(alc) + C(cig)"""
response, predictors = dmatrices(formula, drugs2_df, return_type="dataframe")
fit = sm.GLM(response,
predictors,
family=sm.families.Binomial(link=sm.families.links.logit()),
weights=drugs2_df["n"]).fit()
print(fit.summary())
```
### Section 7.5.1
```
crabs_df = pd.read_csv("https://github.com/saketkc/pyFLGLM/blob/master/data/Crabs.tsv.gz?raw=true", compression="gzip", sep="\t")
crabs_df.head()
formula = """y ~ 1"""
response, predictors = dmatrices(formula, crabs_df, return_type="dataframe")
fit = sm.GLM(response,
predictors,
family=sm.families.Poisson(link=sm.families.links.log())).fit()
print(fit.summary())
formula = """y ~ 1"""
response, predictors = dmatrices(formula, crabs_df, return_type="dataframe")
fit = sm.GLM(response,
predictors,
family=sm.families.NegativeBinomial(link=sm.families.links.log())).fit(scale='x2')
# (crabs_df["y"].var()-crabs_df["y"].mean())/(crabs_df["y"].mean()**2)#- fit.mu
overdispersion = fit.pearson_chi2 / fit.df_resid
print(fit.summary())
print('Overdispersion: {}'.format(overdispersion))
import statsmodels.discrete.count_model as cm
formula = """y ~ 1"""
response, predictors = dmatrices(formula, crabs_df, return_type="dataframe")
fit = cm.ZeroInflatedPoisson(response,
predictors).fit()
print(fit.summary())
import statsmodels.discrete.count_model as cm
formula = """y ~ 1"""
response, predictors = dmatrices(formula, crabs_df, return_type="dataframe")
fit = cm.ZeroInflatedNegativeBinomialP(response,
predictors,
p=2).fit()
print(fit.summary())
formula = """y ~ weight + color"""
response, predictors = dmatrices(formula, crabs_df, return_type="dataframe")
fit = cm.ZeroInflatedNegativeBinomialP(response,
predictors).fit()
print(fit.summary())
formula = """y ~ weight + color"""
response, predictors = dmatrices(formula, crabs_df, return_type="dataframe")
fit = sm.GLM(response,
predictors,
family=sm.families.NegativeBinomial(link=sm.families.links.log())).fit(scale='x2')
print(fit.summary())
```
|
github_jupyter
|
!pip install proplot
import warnings
import pandas as pd
import proplot as plot
import seaborn as sns
import statsmodels
import statsmodels.api as sm
import statsmodels.formula.api as smf
from patsy import dmatrices
from scipy import stats
warnings.filterwarnings("ignore")
%pylab inline
plt.rcParams["axes.labelweight"] = "bold"
plt.rcParams["font.weight"] = "bold"
cancer_df = pd.read_csv("https://github.com/saketkc/pyFLGLM/blob/master/data/Cancer.tsv.gz?raw=true", compression="gzip", sep="\t")
cancer_df.head()
cancer_df["logrisktime"] = np.log(cancer_df["risktime"])
formula = """count ~ C(histology) + C(stage) + C(time)"""
response, predictors = dmatrices(formula, cancer_df, return_type="dataframe")
fit = sm.GLM(
response, predictors, family=sm.families.Poisson(link=sm.families.links.log()),
offset=cancer_df["logrisktime"]
).fit()
print(fit.summary())
drugs_df = pd.read_csv("https://github.com/saketkc/pyFLGLM/blob/master/data/Drugs.tsv.gz?raw=true", compression="gzip", sep="\t")
drugs_df = drugs_df.rename(columns={"A": "alc", "C": "cig", "M": "mar"})
drugs_df
formula = """count ~ C(alc) + C(cig) + C(mar)"""
response, predictors = dmatrices(formula, drugs_df, return_type="dataframe")
mutual_indep = sm.GLM(
response, predictors, family=sm.families.Poisson(link=sm.families.links.log())).fit()
print(mutual_indep.summary())
l = ["yes", "no"]
formula = """count ~ C(alc, levels=l) + C(cig, levels=l) + C(mar, levels=l) + C(alc, levels=l):C(cig, levels=l) + C(alc, levels=l):C(mar,levels=l) + C(cig,levels=l):C(mar,levels=l)"""
response, predictors = dmatrices(formula, drugs_df, return_type="dataframe")
homo_association = sm.GLM(
response, predictors, family=sm.families.Poisson(link=sm.families.links.log())).fit()
print(homo_association.summary())
print('AIC: {}'.format(homo_association.aic))
pearson_resid = homo_association.resid_pearson
std_resid = homo_association.resid_response
print(np.sum(pearson_resid**2))
counts = drugs_df["count"]
df = pd.DataFrame( np.vstack([counts.values,
homo_association.fittedvalues,
homo_association.resid_pearson,
homo_association.resid_response])).T
df.columns = ["count", "fitted", "pearsonr_resid", "std_resid"]
df
drugs2_df = pd.read_csv("https://github.com/saketkc/pyFLGLM/blob/master/data/Drugs2.tsv.gz?raw=true", compression="gzip", sep="\t")
drugs2_df = drugs2_df.rename(columns={"A": "alc", "C": "cig"})
drugs2_df["M_yes_byn"] = drugs2_df["M_yes"]/drugs2_df["n"]
l = ["yes", "no"]
#formula = """M_yes/n ~ C(alc, levels=l) + C(cig, levels=l)"""
#formula = """I(M_yes/n) ~ C(alc) + C(cig)"""
formula = """M_yes_byn ~ C(alc) + C(cig)"""
response, predictors = dmatrices(formula, drugs2_df, return_type="dataframe")
fit = sm.GLM(response,
predictors,
family=sm.families.Binomial(link=sm.families.links.logit()),
weights=drugs2_df["n"]).fit()
print(fit.summary())
crabs_df = pd.read_csv("https://github.com/saketkc/pyFLGLM/blob/master/data/Crabs.tsv.gz?raw=true", compression="gzip", sep="\t")
crabs_df.head()
formula = """y ~ 1"""
response, predictors = dmatrices(formula, crabs_df, return_type="dataframe")
fit = sm.GLM(response,
predictors,
family=sm.families.Poisson(link=sm.families.links.log())).fit()
print(fit.summary())
formula = """y ~ 1"""
response, predictors = dmatrices(formula, crabs_df, return_type="dataframe")
fit = sm.GLM(response,
predictors,
family=sm.families.NegativeBinomial(link=sm.families.links.log())).fit(scale='x2')
# (crabs_df["y"].var()-crabs_df["y"].mean())/(crabs_df["y"].mean()**2)#- fit.mu
overdispersion = fit.pearson_chi2 / fit.df_resid
print(fit.summary())
print('Overdispersion: {}'.format(overdispersion))
import statsmodels.discrete.count_model as cm
formula = """y ~ 1"""
response, predictors = dmatrices(formula, crabs_df, return_type="dataframe")
fit = cm.ZeroInflatedPoisson(response,
predictors).fit()
print(fit.summary())
import statsmodels.discrete.count_model as cm
formula = """y ~ 1"""
response, predictors = dmatrices(formula, crabs_df, return_type="dataframe")
fit = cm.ZeroInflatedNegativeBinomialP(response,
predictors,
p=2).fit()
print(fit.summary())
formula = """y ~ weight + color"""
response, predictors = dmatrices(formula, crabs_df, return_type="dataframe")
fit = cm.ZeroInflatedNegativeBinomialP(response,
predictors).fit()
print(fit.summary())
formula = """y ~ weight + color"""
response, predictors = dmatrices(formula, crabs_df, return_type="dataframe")
fit = sm.GLM(response,
predictors,
family=sm.families.NegativeBinomial(link=sm.families.links.log())).fit(scale='x2')
print(fit.summary())
| 0.730963 | 0.905615 |
# Support Vector Machines
In this notebook, we will build a Support Vector Machine (SVM) that will find the optimal hyperplane that maximizes the margin between two toy data classes using gradient descent. An SVM is supervised machine learning algorithm which can be used for both classification or regression problems. But it's usually used for classification. Given 2 or more labeled classes of data, it acts as a discriminative classifier, formally defined by an optimal hyperplane that seperates all the classes. New examples that are then mapped into that same space can then be categorized based on on which side of the gap they fall.
Support vectors are the data points nearest to the hyperplane, the points of a data set that, if removed, would alter the position of the dividing hyperplane. Because of this, they can be considered the critical elements of a data set, they are what help us build our SVM.
A hyperplane is a linear decision surface that splits the space into two parts; a hyperplane is a binary classifier. Geometry tells us that a hyperplane is a subspace of one dimension less than its ambient space. For instance, a hyperplane of an n-dimensional space is a flat subset with dimension n − 1. By its nature, it separates the space into two half spaces.
## Generating Toy Data
```
%matplotlib inline
import matplotlib.pyplot as plt
from sklearn.datasets.samples_generator import make_blobs
# Generate toy data that has two distinct classes and a huge gap between them
X, Y = make_blobs(n_samples=500, centers=2, random_state=0, cluster_std=0.4) # X - features, Y - labels
# Plot the toy data
plt.scatter(x=X[:, 0], y=X[:, 1])
```
## Creating our Support Vector Machine
We will be using PyTorch to create our SVM.
```
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
class SVM(nn.Module):
"""
Linear Support Vector Machine
-----------------------------
This SVM is a subclass of the PyTorch nn module that
implements the Linear function. The size of each
input sample is 2 and output sample is 1.
"""
def __init__(self):
super().__init__() # Call the init function of nn.Module
self.fully_connected = nn.Linear(2, 1) # Implement the Linear function
def forward(self, x):
fwd = self.fully_connected(x) # Forward pass
return fwd
```
Our Support Vector Machine (SVM) is a subclass of the `nn.Module` class and to initialize our SVM, we call the base class' `init` function. Our `Forward` function applies a linear transformation to the incoming data: *y = Ax + b*.
## Feature Scaling
Feature scaling is a method used to standardize the range of independent variables or features of data. In data processing, it is also known as data normalization and is generally performed during the data preprocessing step. Standardizing the features so that they are centered around 0 with a standard deviation of 1 is not only important if we are comparing measurements that have different units, but it is also a general requirement for many machine learning algorithms.
But why feature scaling?
> The true reason behind scaling features in SVM is the fact, that this classifier is not affine transformation invariant. In other words, if you multiply one feature by a 1000 than a solution given by SVM will be completely different. It has nearly nothing to do with the underlying optimization techniques (although they are affected by these scales problems, they should still converge to global optimum).
> Consider an example: you have man and a woman, encoded by their sex and height (two features). Let us assume a very simple case with such data:
> 0-man, 1-woman
> 1 150
> 1 160
> 1 170
> 0 180
> 0 190
> 0 200
> And let us do something silly. Train it to predict the sex of the person, so we are trying to learn f(x,y)=x (ignoring second parameter).
> It is easy to see, that for such data largest margin classifier will "cut" the plane horizontally somewhere around height "175", so once we get new sample "0 178" (a woman of 178cm height) we get the classification that she is a man.
> However, if we scale down everything to [0,1] we get sth like
> 0 0.0
> 0 0.2
> 0 0.4
> 1 0.6
> 1 0.8
> 1 1.0
> and now largest margin classifier "cuts" the plane nearly vertically (as expected) and so given new sample "0 178" which is also scaled to around "0 0.56" we get that it is a woman (correct!)
Source: *scaling?, W. (2018). Why feature scaling?. [online] Stackoverflow.com. Available at: https://stackoverflow.com/questions/26225344/why-feature-scaling?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa [Accessed 6 Apr. 2018].*
```
data = X # Before feature scaling
X = (X - X.mean())/X.std() # Feature scaling
Y[Y == 0] = -1 # Replace zeros with -1
plt.scatter(x=X[:, 0], y=X[:, 1]) # After feature scaling
plt.scatter(x=data[:, 0], y=data[:, 1], c='r') # Before feature scaling
```
## Training
Now let's go ahead and train our SVM.
```
learning_rate = 0.1 # Learning rate
epoch = 10 # Number of epochs
batch_size = 1 # Batch size
X = torch.FloatTensor(X) # Convert X and Y to FloatTensors
Y = torch.FloatTensor(Y)
N = len(Y) # Number of samples, 500
model = SVM() # Our model
optimizer = optim.SGD(model.parameters(), lr=learning_rate) # Our optimizer
model.train() # Our model, SVM is a subclass of the nn.Module, so it inherits the train method
for epoch in range(epoch):
perm = torch.randperm(N) # Generate a set of random numbers of length: sample size
sum_loss = 0 # Loss for each epoch
for i in range(0, N, batch_size):
x = X[perm[i:i + batch_size]] # Pick random samples by iterating over random permutation
y = Y[perm[i:i + batch_size]] # Pick the correlating class
x = Variable(x) # Convert features and classes to variables
y = Variable(y)
optimizer.zero_grad() # Manually zero the gradient buffers of the optimizer
output = model(x) # Compute the output by doing a forward pass
loss = torch.mean(torch.clamp(1 - output * y, min=0)) # hinge loss
loss.backward() # Backpropagation
optimizer.step() # Optimize and adjust weights
sum_loss += loss[0].data.cpu().numpy() # Add the loss
print("Epoch {}, Loss: {}".format(epoch, sum_loss[0]))
```
## Results

|
github_jupyter
|
%matplotlib inline
import matplotlib.pyplot as plt
from sklearn.datasets.samples_generator import make_blobs
# Generate toy data that has two distinct classes and a huge gap between them
X, Y = make_blobs(n_samples=500, centers=2, random_state=0, cluster_std=0.4) # X - features, Y - labels
# Plot the toy data
plt.scatter(x=X[:, 0], y=X[:, 1])
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
class SVM(nn.Module):
"""
Linear Support Vector Machine
-----------------------------
This SVM is a subclass of the PyTorch nn module that
implements the Linear function. The size of each
input sample is 2 and output sample is 1.
"""
def __init__(self):
super().__init__() # Call the init function of nn.Module
self.fully_connected = nn.Linear(2, 1) # Implement the Linear function
def forward(self, x):
fwd = self.fully_connected(x) # Forward pass
return fwd
data = X # Before feature scaling
X = (X - X.mean())/X.std() # Feature scaling
Y[Y == 0] = -1 # Replace zeros with -1
plt.scatter(x=X[:, 0], y=X[:, 1]) # After feature scaling
plt.scatter(x=data[:, 0], y=data[:, 1], c='r') # Before feature scaling
learning_rate = 0.1 # Learning rate
epoch = 10 # Number of epochs
batch_size = 1 # Batch size
X = torch.FloatTensor(X) # Convert X and Y to FloatTensors
Y = torch.FloatTensor(Y)
N = len(Y) # Number of samples, 500
model = SVM() # Our model
optimizer = optim.SGD(model.parameters(), lr=learning_rate) # Our optimizer
model.train() # Our model, SVM is a subclass of the nn.Module, so it inherits the train method
for epoch in range(epoch):
perm = torch.randperm(N) # Generate a set of random numbers of length: sample size
sum_loss = 0 # Loss for each epoch
for i in range(0, N, batch_size):
x = X[perm[i:i + batch_size]] # Pick random samples by iterating over random permutation
y = Y[perm[i:i + batch_size]] # Pick the correlating class
x = Variable(x) # Convert features and classes to variables
y = Variable(y)
optimizer.zero_grad() # Manually zero the gradient buffers of the optimizer
output = model(x) # Compute the output by doing a forward pass
loss = torch.mean(torch.clamp(1 - output * y, min=0)) # hinge loss
loss.backward() # Backpropagation
optimizer.step() # Optimize and adjust weights
sum_loss += loss[0].data.cpu().numpy() # Add the loss
print("Epoch {}, Loss: {}".format(epoch, sum_loss[0]))
| 0.938124 | 0.992795 |
# Motion Deblur Acquisition Notebook
```
# Magic Functions
%matplotlib notebook
%load_ext autoreload
%autoreload 2
# Misc imports
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from time import sleep
import sys, os, time, datetime
# Platform imports
import os, glob
from os.path import expanduser
# llops imports
from llops import Roi
# Comptic imports
import comptic
# Imports from this library
from htdeblur.acquisition import hardware
from htdeblur import acquisition
import htdeblur as md
# Define micro-manager directory
hardware.mm_directory = 'C:\Program Files\Micro-Manager-2.0beta'
```
## Create Hardware Controllers
```
# Define COM ports
led_array_port = "COM7"
xy_stage_port = "COM5"
camera_name = 'pco'
# Delete old devices
if 'led_controller' in locals():
led_controller.unload()
if 'pos_controller' in locals():
pos_controller.unload()
if 'cam_controller' in locals():
cam_controller.unload()
# Create LED controller interface
led_controller = hardware.LedArrayController(led_array_port)
# Create position controller interface
pos_controller = hardware.PositionController(xy_stage_port, trigger_mode='hardware')
# Create camera controller object
cam_controller = hardware.CameraController(camera_name, trigger_mode='hardware')
# Set up hardware triggering
cam_controller.trigger_pin = 0
led_controller.camera_trigger_index = 1
# Set up position stage
pos_controller.trigger_pin = 1
led_controller.motion_stage_trigger_index = 1
# Make a list
hardware_controller_list = [led_controller, cam_controller, pos_controller]
# Illuminate the light source as verification
led_controller.clear()
```
## Create data output directory
```
out_dir = 'D:\\Zack\\' + datetime.date.today().strftime("%m-%d-%y") +'-MotionDeblur\\'
if not os.path.exists(out_dir):
os.makedirs(out_dir)
```
# Define System Metadata
```
# Define LED positions using actual distances from sample
array_z_distance_mm = 60.0
# Load quasi-dome positions
led_controller.array_distance = array_z_distance_mm
source_list_cart = np.asarray(led_controller.led_position_list_cart)
source_list_na_design = np.asarray(led_controller.led_position_list_na)
# Define system metadata
sys_metadata_dict = {
'objective' :{'na': .25, 'mag' : 10}, # remember to set system_mag to 2. for B (front port)
'system' : {'mag': 1},
'illumination' : {"device_name" : led_controller.device_name, "spectrum" : {"center" : {'b' : 0.480, "g" : 0.532, "r" : 0.625}},
"z_distance_mm" : array_z_distance_mm, "state_list" : {"design" : source_list_na_design, 'units' : 'na'}, 'bit_depth' : led_controller.bit_depth, "device_type" : led_controller.type},
'position' : {"device_name" : "h117", "state_list" : {'units' : 'mm'}, "device_type" : "xy_stage"}
}
# Create metadata object
sys_metadata = comptic.containers.Metadata(from_dict=sys_metadata_dict)
# Assign wavelengths
sys_metadata.illumination.spectrum.center = led_controller.color_channel_center_wavelengths
# Set up camera
if camera_name == "pco":
sys_metadata.camera.pixel_size_um = 6.5
sys_metadata.camera.is_color = False
sys_metadata.camera.device_name = camera_name
sys_metadata.camera.port = 'side'
sys_metadata.system.mag = 1.0
# Geometric transforms
cam_controller.transpose = False
cam_controller.flip_x = True
cam_controller.flip_y = False
elif camera_name == "pco_color":
sys_metadata.camera.pixel_size_um = 6.5 * 2
sys_metadata.camera.is_color = True
sys_metadata.camera.device_name = 'pco'
sys_metadata.camera.port = 'front'
sys_metadata.system.mag = 1.934
# Geometric transforms
cam_controller.transpose = False
cam_controller.flip_x = True
cam_controller.flip_y = False
cam_controller.bayer_coupling_matrix = sys_metadata.camera.bayer_coupling_matrix
cam_controller.is_color = True
elif camera_name == 'optimos':
sys_metadata.camera.pixel_size_um = 4.54
sys_metadata.camera.is_color = True
sys_metadata.camera.bayer_coupling_matrix = bayer_coupling_matrix
sys_metadata.system.mag = 2.0
sys_metadata.camera.port = 'front'
sys_metadata.camera.device_name = camera_name
# Geometric transforms
cam_controller.transpose = False
cam_controller.flip_x = False
cam_controller.flip_y = False
cam_controller.bayer_coupling_matrix = sys_metadata.camera.bayer_coupling_matrix
cam_controller.is_color = True
elif camera_name == 'retiga':
sys_metadata.camera.pixel_size_um = 4.54
sys_metadata.camera.is_color = False
sys_metadata.system.mag = 1.0
sys_metadata.camera.port = 'side'
sys_metadata.camera.device_name = camera_name
# Geometric transforms
cam_controller.transpose = False
cam_controller.flip_x = False
cam_controller.flip_y = False
cam_controller.bayer_coupling_matrix = None
cam_controller.is_color = False
else:
raise ValueError("No camera found!")
# Calculate effective pixel size
sys_metadata.system.eff_pixel_size_um = sys_metadata.camera.pixel_size_um / (sys_metadata.objective.mag * sys_metadata.system.mag)
# Print system metadata
print(sys_metadata)
# Set up led array hardware to use this metadata
led_controller.na = sys_metadata.objective.na
led_controller.array_distance = sys_metadata.illumination.z_distance_mm
# Set up joystick
pos_controller.setJoystickFlip(True, False) # up is +y, right is +x
# Print information about sampling
sys_metadata.checkSampling()
# Display brightfield pattern for alignment
led_controller.bf()
```
# Live Viewer
Things to do here:
- Ensure the camera doesn't saturate (You want around 40k-50k counts on average
- Ensure motion is linear and not at an angle (use the top of a window to align)
```
# Illuminate
led_controller.bf()
# Exposure
exposure_time_ms = 1
# Start live view
cam_controller.liveView(figsize=(10,10), zoom_factor=4, contrast_type='fit')
# pos_controller.zero()
```
## Set up Acquisition
```
# Reload devices
pos_controller.reload()
led_controller.reload()
# Zero position
pos_controller.zero()
# Define dataset name
sys_metadata.sample.name = 'beads_test'
# Define sample size (in mm)
sample_size_0 = (4, 4) #(22, 28)
# Overlap parameter (1 = no overlap)
overlap_factor = 0.8 # 20% overlap of frames
# Calculate FOV
fov = cam_controller.fov(sys_metadata.system.mag * sys_metadata.objective.mag)
# Round up sample size
sys_metadata.sample.size_mm = np.ceil(np.asarray(sample_size_0) / np.asarray((overlap_factor * fov[0], overlap_factor * fov[1]))) * np.asarray((overlap_factor * fov[0], overlap_factor * fov[1]))
```
# Acquisition
# Background Image Capture
This step captures the background (dust in the system without the sample) as well as the dark current (signal when illumination is off).
```
# pos_controller.acceleration = 1e2
pos_controller.velocity = 30
n_avg = 3
sample_free_offset = (0, -25)
exposure_bg = 0.1
# Go to offset position and capture background
pos_controller.goToPosition(sample_free_offset, blocking=True)
cam_controller.setExposure(exposure_bg)
led_controller.bf()
time.sleep(1)
img_bg = cam_controller.snap()
# Turn off Illumination
led_controller.clear()
# Capture dark current image
img_dc = cam_controller.snap()
# Perform averaging
for _ in range(n_avg - 1):
time.sleep(0.1)
img_dc += cam_controller.snap()
# Normalize
img_dc = img_dc / n_avg
# Go back home
pos_controller.goToPosition((0,0))
plt.figure(figsize=(12,4))
plt.subplot(121)
plt.imshow(img_bg)
plt.axis('off')
plt.title('Background, Mean = %g' % np.mean(img_bg))
plt.subplot(122)
plt.imshow(img_dc)
plt.axis('off')
plt.title('Dark Current, Mean = %g' % np.mean(img_dc))
```
## Stop and Stare Acquisition
This step performs a stop-and-stare acquisition
```
# Create Acquisition Object
sns_acq = acquisition.StopAndStareAcquisition(hardware_controller_list,
sys_metadata,
frame_spacing_mm=(overlap_factor * fov[0], overlap_factor * fov[1]), # y,x
object_size_mm=sys_metadata.sample.size_mm, # y,x
settle_time_s=0.1)
# Acquire
dataset_sns = sns_acq.acquire(exposure_time_ms=100)
# Save background and dark current
dataset_sns.background = img_bg
dataset_sns.dark_current = img_dc
# Reset Stage
pos_controller.goToPosition((0,0))
# Save
meta = dataset_sns.save(out_dir, header=sys_metadata.sample.name + '_stopandstare', bit_depth=16)
plt.figure()
plt.imshow(dataset_sns.frame_list[2])
# plt.clim((0,1000))
```
## Motion Deblur Acquisition Set-up
This cell configures and resets the hardware in advance of an acquisition
```
# Re-initialize position controller
pos_controller.reload()
pos_controller.goToPositionRapid((0,0), blocking=True)
pos_controller.acceleration = 8e2
# Derivative of acceleration
pos_controller.jerk = 7423223
# Trigger pulse width
pos_controller.trigger_pulse_width_us = 2000
# Re-initialize led controller
led_controller.reload()
led_controller.bf()
led_controller.command_debug = False
# Stepper motor speed
pos_controller.command('SS 1')
# Disable encoders
pos_controller.command('ENCODER X 0')
pos_controller.command('ENCODER Y 0')
# Reset
# led_controller.trigger_frame_time_s = [0, 0]
```
## Motion Deblur Acquisition
```
# Generate acquisition object using options above
md_acq = acquisition.MotionDeblurAcquisition(hardware_controller_list,
sys_metadata,
frame_spacing_mm=(overlap_factor * fov[0], overlap_factor * fov[1]), # x,y
object_size_mm=sys_metadata.sample.size_mm, # x,y
saturation_factor=0.5, #1/8
extra_run_up_time_s=0.1,
kernel_pulse_count=50,
velocity_mm_s=25,
illumination_sequence_count=1,
motion_path_type="raster",
blur_vector_method="coded",
use_l1_distance_for_motion_calculations=True,
segment_delay_s=0.0)
# Acquire
dataset = md_acq.acquire(reset_devices=False)
# Save background and dark current
dataset.background = img_bg
dataset.dark_current = img_dc
# Save results
meta = dataset.save(out_dir,
header=sys_metadata.sample.name + '_' + md_acq.blur_vector_method + '_' + md_acq.motion_path_type + '_' + str(md_acq.kernel_pulse_count))
plt.figure()
plt.imshow(dataset.frame_list[2])
plt.clim((0,1000))
```
## Check Registration
```
dataset.frame_segment_list = dataset.frame_segment_list_full
# Register frames
import comptic
frame_offsets = comptic.registration.register_roi_list(dataset.frame_list,
dataset.roi_list,
debug=False,
replace_untrusted=False)
# Convert to mm
frame_offsets_mm = [[coord * dataset.metadata.system.eff_pixel_size_um / 1000 for coord in offset] for offset in frame_offsets]
# Calculate existing centers
frame_centers_mm = [[coord * dataset.metadata.system.eff_pixel_size_um / 1000 for coord in roi.center] for roi in dataset.roi_list]
# Apply correction
frame_centers_corrected_mm = [(center[0] + offset[0], center[1] + offset[1]) for center, offset in zip(frame_centers_mm, frame_offsets_mm)]
# Show correction
plt.figure()
plt.scatter(np.asarray(frame_centers_corrected_mm)[:,1], np.asarray(frame_centers_corrected_mm)[:,0], label='Corrected')
plt.scatter(np.asarray(frame_centers_mm)[:,1], np.asarray(frame_centers_mm)[:,0], label='Predicted')
plt.axis('equal')
plt.legend()
plt.xlabel('Position (mm)')
plt.ylabel('Position (mm)')
```
## Processing
```
# Expand frame_state_list
dataset.fixOldMdDatasets()
import llops as yp
# yp.config.setDefaultBackend('arrayfire')
yp.config.setDefaultBackend('numpy')
# Perform registration
dataset.register(force=True, frame_offset=-22, segment_offset=(15, -290), debug=False,
frame_registration_mode='xc', segment_registration_mode='xc')
# Perform normalization
dataset.normalize(force=True)
# Create recon object
recon = md.recon.Reconstruction(dataset, alpha_blend_distance=1000, pad_mode=0, use_psf=False)
# # # Perform reconstruction
# recon.reconstruct(iteration_count=-1, step_size=1, mode='static', reg_types={})
# # Show result
# recon.show()
# Perform reconstruction
recon.reconstruct(iteration_count=-1, step_size=1, mode='single', frame_number=0, reg_types={})
# Show result
recon.show()
plt.clim(400,5000)
```
|
github_jupyter
|
# Magic Functions
%matplotlib notebook
%load_ext autoreload
%autoreload 2
# Misc imports
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from time import sleep
import sys, os, time, datetime
# Platform imports
import os, glob
from os.path import expanduser
# llops imports
from llops import Roi
# Comptic imports
import comptic
# Imports from this library
from htdeblur.acquisition import hardware
from htdeblur import acquisition
import htdeblur as md
# Define micro-manager directory
hardware.mm_directory = 'C:\Program Files\Micro-Manager-2.0beta'
# Define COM ports
led_array_port = "COM7"
xy_stage_port = "COM5"
camera_name = 'pco'
# Delete old devices
if 'led_controller' in locals():
led_controller.unload()
if 'pos_controller' in locals():
pos_controller.unload()
if 'cam_controller' in locals():
cam_controller.unload()
# Create LED controller interface
led_controller = hardware.LedArrayController(led_array_port)
# Create position controller interface
pos_controller = hardware.PositionController(xy_stage_port, trigger_mode='hardware')
# Create camera controller object
cam_controller = hardware.CameraController(camera_name, trigger_mode='hardware')
# Set up hardware triggering
cam_controller.trigger_pin = 0
led_controller.camera_trigger_index = 1
# Set up position stage
pos_controller.trigger_pin = 1
led_controller.motion_stage_trigger_index = 1
# Make a list
hardware_controller_list = [led_controller, cam_controller, pos_controller]
# Illuminate the light source as verification
led_controller.clear()
out_dir = 'D:\\Zack\\' + datetime.date.today().strftime("%m-%d-%y") +'-MotionDeblur\\'
if not os.path.exists(out_dir):
os.makedirs(out_dir)
# Define LED positions using actual distances from sample
array_z_distance_mm = 60.0
# Load quasi-dome positions
led_controller.array_distance = array_z_distance_mm
source_list_cart = np.asarray(led_controller.led_position_list_cart)
source_list_na_design = np.asarray(led_controller.led_position_list_na)
# Define system metadata
sys_metadata_dict = {
'objective' :{'na': .25, 'mag' : 10}, # remember to set system_mag to 2. for B (front port)
'system' : {'mag': 1},
'illumination' : {"device_name" : led_controller.device_name, "spectrum" : {"center" : {'b' : 0.480, "g" : 0.532, "r" : 0.625}},
"z_distance_mm" : array_z_distance_mm, "state_list" : {"design" : source_list_na_design, 'units' : 'na'}, 'bit_depth' : led_controller.bit_depth, "device_type" : led_controller.type},
'position' : {"device_name" : "h117", "state_list" : {'units' : 'mm'}, "device_type" : "xy_stage"}
}
# Create metadata object
sys_metadata = comptic.containers.Metadata(from_dict=sys_metadata_dict)
# Assign wavelengths
sys_metadata.illumination.spectrum.center = led_controller.color_channel_center_wavelengths
# Set up camera
if camera_name == "pco":
sys_metadata.camera.pixel_size_um = 6.5
sys_metadata.camera.is_color = False
sys_metadata.camera.device_name = camera_name
sys_metadata.camera.port = 'side'
sys_metadata.system.mag = 1.0
# Geometric transforms
cam_controller.transpose = False
cam_controller.flip_x = True
cam_controller.flip_y = False
elif camera_name == "pco_color":
sys_metadata.camera.pixel_size_um = 6.5 * 2
sys_metadata.camera.is_color = True
sys_metadata.camera.device_name = 'pco'
sys_metadata.camera.port = 'front'
sys_metadata.system.mag = 1.934
# Geometric transforms
cam_controller.transpose = False
cam_controller.flip_x = True
cam_controller.flip_y = False
cam_controller.bayer_coupling_matrix = sys_metadata.camera.bayer_coupling_matrix
cam_controller.is_color = True
elif camera_name == 'optimos':
sys_metadata.camera.pixel_size_um = 4.54
sys_metadata.camera.is_color = True
sys_metadata.camera.bayer_coupling_matrix = bayer_coupling_matrix
sys_metadata.system.mag = 2.0
sys_metadata.camera.port = 'front'
sys_metadata.camera.device_name = camera_name
# Geometric transforms
cam_controller.transpose = False
cam_controller.flip_x = False
cam_controller.flip_y = False
cam_controller.bayer_coupling_matrix = sys_metadata.camera.bayer_coupling_matrix
cam_controller.is_color = True
elif camera_name == 'retiga':
sys_metadata.camera.pixel_size_um = 4.54
sys_metadata.camera.is_color = False
sys_metadata.system.mag = 1.0
sys_metadata.camera.port = 'side'
sys_metadata.camera.device_name = camera_name
# Geometric transforms
cam_controller.transpose = False
cam_controller.flip_x = False
cam_controller.flip_y = False
cam_controller.bayer_coupling_matrix = None
cam_controller.is_color = False
else:
raise ValueError("No camera found!")
# Calculate effective pixel size
sys_metadata.system.eff_pixel_size_um = sys_metadata.camera.pixel_size_um / (sys_metadata.objective.mag * sys_metadata.system.mag)
# Print system metadata
print(sys_metadata)
# Set up led array hardware to use this metadata
led_controller.na = sys_metadata.objective.na
led_controller.array_distance = sys_metadata.illumination.z_distance_mm
# Set up joystick
pos_controller.setJoystickFlip(True, False) # up is +y, right is +x
# Print information about sampling
sys_metadata.checkSampling()
# Display brightfield pattern for alignment
led_controller.bf()
# Illuminate
led_controller.bf()
# Exposure
exposure_time_ms = 1
# Start live view
cam_controller.liveView(figsize=(10,10), zoom_factor=4, contrast_type='fit')
# pos_controller.zero()
# Reload devices
pos_controller.reload()
led_controller.reload()
# Zero position
pos_controller.zero()
# Define dataset name
sys_metadata.sample.name = 'beads_test'
# Define sample size (in mm)
sample_size_0 = (4, 4) #(22, 28)
# Overlap parameter (1 = no overlap)
overlap_factor = 0.8 # 20% overlap of frames
# Calculate FOV
fov = cam_controller.fov(sys_metadata.system.mag * sys_metadata.objective.mag)
# Round up sample size
sys_metadata.sample.size_mm = np.ceil(np.asarray(sample_size_0) / np.asarray((overlap_factor * fov[0], overlap_factor * fov[1]))) * np.asarray((overlap_factor * fov[0], overlap_factor * fov[1]))
# pos_controller.acceleration = 1e2
pos_controller.velocity = 30
n_avg = 3
sample_free_offset = (0, -25)
exposure_bg = 0.1
# Go to offset position and capture background
pos_controller.goToPosition(sample_free_offset, blocking=True)
cam_controller.setExposure(exposure_bg)
led_controller.bf()
time.sleep(1)
img_bg = cam_controller.snap()
# Turn off Illumination
led_controller.clear()
# Capture dark current image
img_dc = cam_controller.snap()
# Perform averaging
for _ in range(n_avg - 1):
time.sleep(0.1)
img_dc += cam_controller.snap()
# Normalize
img_dc = img_dc / n_avg
# Go back home
pos_controller.goToPosition((0,0))
plt.figure(figsize=(12,4))
plt.subplot(121)
plt.imshow(img_bg)
plt.axis('off')
plt.title('Background, Mean = %g' % np.mean(img_bg))
plt.subplot(122)
plt.imshow(img_dc)
plt.axis('off')
plt.title('Dark Current, Mean = %g' % np.mean(img_dc))
# Create Acquisition Object
sns_acq = acquisition.StopAndStareAcquisition(hardware_controller_list,
sys_metadata,
frame_spacing_mm=(overlap_factor * fov[0], overlap_factor * fov[1]), # y,x
object_size_mm=sys_metadata.sample.size_mm, # y,x
settle_time_s=0.1)
# Acquire
dataset_sns = sns_acq.acquire(exposure_time_ms=100)
# Save background and dark current
dataset_sns.background = img_bg
dataset_sns.dark_current = img_dc
# Reset Stage
pos_controller.goToPosition((0,0))
# Save
meta = dataset_sns.save(out_dir, header=sys_metadata.sample.name + '_stopandstare', bit_depth=16)
plt.figure()
plt.imshow(dataset_sns.frame_list[2])
# plt.clim((0,1000))
# Re-initialize position controller
pos_controller.reload()
pos_controller.goToPositionRapid((0,0), blocking=True)
pos_controller.acceleration = 8e2
# Derivative of acceleration
pos_controller.jerk = 7423223
# Trigger pulse width
pos_controller.trigger_pulse_width_us = 2000
# Re-initialize led controller
led_controller.reload()
led_controller.bf()
led_controller.command_debug = False
# Stepper motor speed
pos_controller.command('SS 1')
# Disable encoders
pos_controller.command('ENCODER X 0')
pos_controller.command('ENCODER Y 0')
# Reset
# led_controller.trigger_frame_time_s = [0, 0]
# Generate acquisition object using options above
md_acq = acquisition.MotionDeblurAcquisition(hardware_controller_list,
sys_metadata,
frame_spacing_mm=(overlap_factor * fov[0], overlap_factor * fov[1]), # x,y
object_size_mm=sys_metadata.sample.size_mm, # x,y
saturation_factor=0.5, #1/8
extra_run_up_time_s=0.1,
kernel_pulse_count=50,
velocity_mm_s=25,
illumination_sequence_count=1,
motion_path_type="raster",
blur_vector_method="coded",
use_l1_distance_for_motion_calculations=True,
segment_delay_s=0.0)
# Acquire
dataset = md_acq.acquire(reset_devices=False)
# Save background and dark current
dataset.background = img_bg
dataset.dark_current = img_dc
# Save results
meta = dataset.save(out_dir,
header=sys_metadata.sample.name + '_' + md_acq.blur_vector_method + '_' + md_acq.motion_path_type + '_' + str(md_acq.kernel_pulse_count))
plt.figure()
plt.imshow(dataset.frame_list[2])
plt.clim((0,1000))
dataset.frame_segment_list = dataset.frame_segment_list_full
# Register frames
import comptic
frame_offsets = comptic.registration.register_roi_list(dataset.frame_list,
dataset.roi_list,
debug=False,
replace_untrusted=False)
# Convert to mm
frame_offsets_mm = [[coord * dataset.metadata.system.eff_pixel_size_um / 1000 for coord in offset] for offset in frame_offsets]
# Calculate existing centers
frame_centers_mm = [[coord * dataset.metadata.system.eff_pixel_size_um / 1000 for coord in roi.center] for roi in dataset.roi_list]
# Apply correction
frame_centers_corrected_mm = [(center[0] + offset[0], center[1] + offset[1]) for center, offset in zip(frame_centers_mm, frame_offsets_mm)]
# Show correction
plt.figure()
plt.scatter(np.asarray(frame_centers_corrected_mm)[:,1], np.asarray(frame_centers_corrected_mm)[:,0], label='Corrected')
plt.scatter(np.asarray(frame_centers_mm)[:,1], np.asarray(frame_centers_mm)[:,0], label='Predicted')
plt.axis('equal')
plt.legend()
plt.xlabel('Position (mm)')
plt.ylabel('Position (mm)')
# Expand frame_state_list
dataset.fixOldMdDatasets()
import llops as yp
# yp.config.setDefaultBackend('arrayfire')
yp.config.setDefaultBackend('numpy')
# Perform registration
dataset.register(force=True, frame_offset=-22, segment_offset=(15, -290), debug=False,
frame_registration_mode='xc', segment_registration_mode='xc')
# Perform normalization
dataset.normalize(force=True)
# Create recon object
recon = md.recon.Reconstruction(dataset, alpha_blend_distance=1000, pad_mode=0, use_psf=False)
# # # Perform reconstruction
# recon.reconstruct(iteration_count=-1, step_size=1, mode='static', reg_types={})
# # Show result
# recon.show()
# Perform reconstruction
recon.reconstruct(iteration_count=-1, step_size=1, mode='single', frame_number=0, reg_types={})
# Show result
recon.show()
plt.clim(400,5000)
| 0.561215 | 0.577793 |
**This notebook is an exercise in the [Introduction to Machine Learning](https://www.kaggle.com/learn/intro-to-machine-learning) course. You can reference the tutorial at [this link](https://www.kaggle.com/alexisbcook/machine-learning-competitions).**
---
# Introduction
In this exercise, you will create and submit predictions for a Kaggle competition. You can then improve your model (e.g. by adding features) to apply what you've learned and move up the leaderboard.
Begin by running the code cell below to set up code checking and the filepaths for the dataset.
```
# Set up code checking
from learntools.core import binder
binder.bind(globals())
from learntools.machine_learning.ex7 import *
# Set up filepaths
import os
if not os.path.exists("../input/train.csv"):
os.symlink("../input/home-data-for-ml-course/train.csv", "../input/train.csv")
os.symlink("../input/home-data-for-ml-course/test.csv", "../input/test.csv")
```
Here's some of the code you've written so far. Start by running it again.
```
# Import helpful libraries
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
# Load the data, and separate the target
iowa_file_path = '../input/train.csv'
home_data = pd.read_csv(iowa_file_path)
y = home_data.SalePrice
# Create X (After completing the exercise, you can return to modify this line!)
features = ['LotArea', 'YearBuilt', '1stFlrSF', '2ndFlrSF', 'FullBath', 'BedroomAbvGr', 'TotRmsAbvGrd']
# Select columns corresponding to features, and preview the data
X = home_data[features]
X.head()
# Split into validation and training data
train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1)
# Define a random forest model
rf_model = RandomForestRegressor(random_state=1)
rf_model.fit(train_X, train_y)
rf_val_predictions = rf_model.predict(val_X)
rf_val_mae = mean_absolute_error(rf_val_predictions, val_y)
print("Validation MAE for Random Forest Model: {:,.0f}".format(rf_val_mae))
```
# Train a model for the competition
The code cell above trains a Random Forest model on **`train_X`** and **`train_y`**.
Use the code cell below to build a Random Forest model and train it on all of **`X`** and **`y`**.
```
# To improve accuracy, create a new Random Forest model which you will train on all training data
rf_model_on_full_data = RandomForestRegressor(random_state=1)
rf_model_on_full_data.fit(X,y)
# fit rf_model_on_full_data on all data from the training data
____
```
Now, read the file of "test" data, and apply your model to make predictions.
```
# path to file you will use for predictions
test_data_path = '../input/test.csv'
# read test data file using pandas
test_data = pd.read_csv(test_data_path)
# create test_X which comes from test_data but includes only the columns you used for prediction.
# The list of columns is stored in a variable called features
features = ['LotArea', 'YearBuilt', '1stFlrSF', '2ndFlrSF', 'FullBath', 'BedroomAbvGr', 'TotRmsAbvGrd']
test_X = test_data[features]
# make predictions which we will submit.
test_preds = rf_model_on_full_data.predict(test_X)
```
Before submitting, run a check to make sure your `test_preds` have the right format.
```
# Check your answer (To get credit for completing the exercise, you must get a "Correct" result!)
step_1.check()
# step_1.solution()
```
# Generate a submission
Run the code cell below to generate a CSV file with your predictions that you can use to submit to the competition.
```
# Run the code to save predictions in the format used for competition scoring
output = pd.DataFrame({'Id': test_data.Id,
'SalePrice': test_preds})
output.to_csv('submission.csv', index=False)
```
# Submit to the competition
To test your results, you'll need to join the competition (if you haven't already). So open a new window by clicking on **[this link](https://www.kaggle.com/c/home-data-for-ml-course)**. Then click on the **Join Competition** button.

Next, follow the instructions below:
1. Begin by clicking on the **Save Version** button in the top right corner of the window. This will generate a pop-up window.
2. Ensure that the **Save and Run All** option is selected, and then click on the **Save** button.
3. This generates a window in the bottom left corner of the notebook. After it has finished running, click on the number to the right of the **Save Version** button. This pulls up a list of versions on the right of the screen. Click on the ellipsis **(...)** to the right of the most recent version, and select **Open in Viewer**. This brings you into view mode of the same page. You will need to scroll down to get back to these instructions.
4. Click on the **Output** tab on the right of the screen. Then, click on the file you would like to submit, and click on the blue **Submit** button to submit your results to the leaderboard.
You have now successfully submitted to the competition!
If you want to keep working to improve your performance, select the **Edit** button in the top right of the screen. Then you can change your code and repeat the process. There's a lot of room to improve, and you will climb up the leaderboard as you work.
# Continue Your Progress
There are many ways to improve your model, and **experimenting is a great way to learn at this point.**
The best way to improve your model is to add features. To add more features to the data, revisit the first code cell, and change this line of code to include more column names:
```python
features = ['LotArea', 'YearBuilt', '1stFlrSF', '2ndFlrSF', 'FullBath', 'BedroomAbvGr', 'TotRmsAbvGrd']
```
Some features will cause errors because of issues like missing values or non-numeric data types. Here is a complete list of potential columns that you might like to use, and that won't throw errors:
- 'MSSubClass'
- 'LotArea'
- 'OverallQual'
- 'OverallCond'
- 'YearBuilt'
- 'YearRemodAdd'
- 'BsmtFinSF1'
- 'BsmtFinSF2'
- 'BsmtUnfSF'
- 'TotalBsmtSF'
- '1stFlrSF'
- '2ndFlrSF'
- 'LowQualFinSF'
- 'GrLivArea'
- 'BsmtFullBath'
- 'BsmtHalfBath'
- 'FullBath'
- 'HalfBath'
- 'BedroomAbvGr'
- 'KitchenAbvGr'
- 'TotRmsAbvGrd'
- 'Fireplaces'
- 'GarageCars'
- 'GarageArea'
- 'WoodDeckSF'
- 'OpenPorchSF'
- 'EnclosedPorch'
- '3SsnPorch'
- 'ScreenPorch'
- 'PoolArea'
- 'MiscVal'
- 'MoSold'
- 'YrSold'
Look at the list of columns and think about what might affect home prices. To learn more about each of these features, take a look at the data description on the **[competition page](https://www.kaggle.com/c/home-data-for-ml-course/data)**.
After updating the code cell above that defines the features, re-run all of the code cells to evaluate the model and generate a new submission file.
# What's next?
As mentioned above, some of the features will throw an error if you try to use them to train your model. The **[Intermediate Machine Learning](https://www.kaggle.com/learn/intermediate-machine-learning)** course will teach you how to handle these types of features. You will also learn to use **xgboost**, a technique giving even better accuracy than Random Forest.
The **[Pandas](https://kaggle.com/Learn/Pandas)** course will give you the data manipulation skills to quickly go from conceptual idea to implementation in your data science projects.
You are also ready for the **[Deep Learning](https://kaggle.com/Learn/intro-to-Deep-Learning)** course, where you will build models with better-than-human level performance at computer vision tasks.
---
*Have questions or comments? Visit the [Learn Discussion forum](https://www.kaggle.com/learn-forum/161285) to chat with other Learners.*
|
github_jupyter
|
# Set up code checking
from learntools.core import binder
binder.bind(globals())
from learntools.machine_learning.ex7 import *
# Set up filepaths
import os
if not os.path.exists("../input/train.csv"):
os.symlink("../input/home-data-for-ml-course/train.csv", "../input/train.csv")
os.symlink("../input/home-data-for-ml-course/test.csv", "../input/test.csv")
# Import helpful libraries
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
# Load the data, and separate the target
iowa_file_path = '../input/train.csv'
home_data = pd.read_csv(iowa_file_path)
y = home_data.SalePrice
# Create X (After completing the exercise, you can return to modify this line!)
features = ['LotArea', 'YearBuilt', '1stFlrSF', '2ndFlrSF', 'FullBath', 'BedroomAbvGr', 'TotRmsAbvGrd']
# Select columns corresponding to features, and preview the data
X = home_data[features]
X.head()
# Split into validation and training data
train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1)
# Define a random forest model
rf_model = RandomForestRegressor(random_state=1)
rf_model.fit(train_X, train_y)
rf_val_predictions = rf_model.predict(val_X)
rf_val_mae = mean_absolute_error(rf_val_predictions, val_y)
print("Validation MAE for Random Forest Model: {:,.0f}".format(rf_val_mae))
# To improve accuracy, create a new Random Forest model which you will train on all training data
rf_model_on_full_data = RandomForestRegressor(random_state=1)
rf_model_on_full_data.fit(X,y)
# fit rf_model_on_full_data on all data from the training data
____
# path to file you will use for predictions
test_data_path = '../input/test.csv'
# read test data file using pandas
test_data = pd.read_csv(test_data_path)
# create test_X which comes from test_data but includes only the columns you used for prediction.
# The list of columns is stored in a variable called features
features = ['LotArea', 'YearBuilt', '1stFlrSF', '2ndFlrSF', 'FullBath', 'BedroomAbvGr', 'TotRmsAbvGrd']
test_X = test_data[features]
# make predictions which we will submit.
test_preds = rf_model_on_full_data.predict(test_X)
# Check your answer (To get credit for completing the exercise, you must get a "Correct" result!)
step_1.check()
# step_1.solution()
# Run the code to save predictions in the format used for competition scoring
output = pd.DataFrame({'Id': test_data.Id,
'SalePrice': test_preds})
output.to_csv('submission.csv', index=False)
features = ['LotArea', 'YearBuilt', '1stFlrSF', '2ndFlrSF', 'FullBath', 'BedroomAbvGr', 'TotRmsAbvGrd']
| 0.648132 | 0.947624 |
# Bi-Variate Regression - Effect on REM:nREM Outcome
The purpose of this notebook is to look exclusively at how different variables are related to the ratio of REM to nREM.
```
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
import statsmodels.api as sm
```
## Data Import
```
df_py3 = pd.read_csv('/Users/hagenfritz/Projects/utx000/studies/cohort1/processed_data/study_adafruit_sleepstats',parse_dates=True,index_col=0)
df_py3['inc%'] = (df_py3['max'] - df_py3['min'])/df_py3['min']
df_py3['var'].unique()
df_py2 = pd.read_csv('/Users/hagenfritz/Projects/utx000/studies/cohort1/processed_data/study_sensirion_sleepstats',parse_dates=True,index_col=0)
df_py2['inc%'] = (df_py2['max'] - df_py2['min'])/df_py2['min']
df_py2['var'].unique()
df_ss = pd.read_csv('/Users/hagenfritz/Projects/utx000/studies/cohort1/processed_data/study_sleepStagesDay.csv',parse_dates=True,index_col=0)
df_ss.head()
```
## One Variable Regression
```
def plotLinearRegression(X,xlim,Y,ylim,show_plot=True):
'''
Inputs:
- X: list of values corresponding to the independent variable
- xlim: list of two values specifying the lower and upper bounds of the horizontal axis
- Y: list of values corresponding to the dependent variable
- ylim: list of two values specifying the lower and upper bounds of the vertical axis
Plots a linear regression of the two variables and returns the ax handle if successful, otherwise False
'''
sns.set(style="white", rc={"axes.facecolor": (0, 0, 0, 0)}, font_scale=1)
try:
if len(X) > 1:
t = np.arange(len(X))
X = sm.add_constant(X)
model = sm.OLS(Y, X).fit()
predictions = model.predict(X) # make the predictions by the model
# Plotting
if show_plot:
## Scatter
fig,ax = plt.subplots(figsize=(10,10))
ax.scatter(X[:,1],Y,c=t,s=100,cmap='Blues',edgecolor='black')
## Line of Best Fit
ax.plot(X[:,1],predictions,color='red')
top = 0
bot = 0
for i in range(len(predictions)):
top += (predictions[i]-np.mean(Y))**2
bot += (Y[i] - np.mean(Y))**2
rsqrd = top/bot
## Formatting Remainder of Axis
ax.set_xlim(xlim)
ax.set_ylim(ylim)
### Print out the statistics
loc_x = min(xlim)+0.01*(max(xlim)-min(xlim))
loc_y = min(ylim)+0.01*(max(ylim)-min(ylim))
ax.text(loc_x,loc_y+0.1*(max(ylim)-min(ylim)),'Intercept: ' + str(round(model.params[0],4)))
ax.text(loc_x,loc_y+0.05*(max(ylim)-min(ylim)),'Slope: ' + str(round(model.params[1],4)))
ax.text(loc_x,loc_y,'r$^2$: ' + str(round(rsqrd,4)))
return ax, model
else:
return False, model
except Exception as inst:
print(inst)
return False, False
```
# Python 3
```
results_df = pd.DataFrame()
df = df_py3
#var = 'TVOC'
#low_cutoffs = [0,0,0]
#high_cutoffs = [1000000,150,100]
#var = 'Lux'
#low_cutoffs = [50,200,10]
#high_cutoffs = [1000,1000,1000]
#var = 'T_NO2'
#low_cutoffs = [15,0,0]
#high_cutoffs = [4000,100000,100000]
#var = 'RH_NO2'
#low_cutoffs = [0,0,0]
#high_cutoffs = [4000,1000,1000]
#var = 'Visible'
#low_cutoffs = [100000,0,100000]
#high_cutoffs = [1000000,1000000,1000000]
df = df_py2
var = 'Temperature [C]'
low_cutoffs = [15,0,0.05]
high_cutoffs = [1000000,150,0.25]
#var = 'Relative Humidity'
#low_cutoffs = [0,0,0]
#high_cutoffs = [1000000,150,1000]
#var = 'CO2'
#low_cutoffs = [1000,0,0]
#high_cutoffs = [1000000,150,2]
#var = 'PM_C_2p5'
#low_cutoffs = [6,0,0]
#high_cutoffs = [1000000,150,200]
#var = 'PM_C_10'
#low_cutoffs = [6,0,0]
#high_cutoffs = [1000000,150,200]
# Masking by variables - still has all participants
df_byvar = df[df['var'] == var]
# Looping through all the statistics - skipping over the ID and var columns
for column in df_byvar.columns:
if column in ['ID','var','min']:
continue
# Looping through all the participants
overall = pd.DataFrame()
for pid in df_byvar['ID'].unique():
dep_vars_ind = df_byvar[df_byvar['ID'] == pid]
indep_vars_ind = df_ss[df_ss['ID'] == pid]
combined = pd.concat([dep_vars_ind,indep_vars_ind],join='inner',axis=1)
# Removing data where the efficiency is less than zero
combined = combined[combined['REM/nREM'] > 0]
# Removing outliers from summary stats
if column in ['max','mean','median']:
combined = combined[combined[column] > low_cutoffs[0]]
combined = combined[combined[column] < high_cutoffs[0]]
if column == 'std':
combined = combined[combined[column] > low_cutoffs[1]]
combined = combined[combined[column] < high_cutoffs[1]]
if column == 'inc%':
combined = combined[combined[column] > low_cutoffs[2]]
combined = combined[combined[column] < high_cutoffs[2]]
if len(combined) > 0:
overall = pd.concat([overall,combined])
X = overall[column].values
y = overall['REM/nREM'].values
ax, model = plotLinearRegression(X,[min(X),max(X)],y,[min(y),max(y)],True)
try:
ax.set_title(f'Variable: {var.upper()}; Stat: {column.upper()}; PID: ALL; Number of Nights: {len(overall)}')
plt.show()
plt.close()
except:
pass
onevar_df = pd.DataFrame(data={'Variable':[var],'Stat':[column.upper()],'Outcome':['REM/nREM'.upper()],
'x0':[model.params[0]],'x1':[model.params[1]],'No. Points':[len(overall)],'Fit':[model.rsquared]})
results_df = pd.concat([results_df,onevar_df])
results_df
```
|
github_jupyter
|
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
import statsmodels.api as sm
df_py3 = pd.read_csv('/Users/hagenfritz/Projects/utx000/studies/cohort1/processed_data/study_adafruit_sleepstats',parse_dates=True,index_col=0)
df_py3['inc%'] = (df_py3['max'] - df_py3['min'])/df_py3['min']
df_py3['var'].unique()
df_py2 = pd.read_csv('/Users/hagenfritz/Projects/utx000/studies/cohort1/processed_data/study_sensirion_sleepstats',parse_dates=True,index_col=0)
df_py2['inc%'] = (df_py2['max'] - df_py2['min'])/df_py2['min']
df_py2['var'].unique()
df_ss = pd.read_csv('/Users/hagenfritz/Projects/utx000/studies/cohort1/processed_data/study_sleepStagesDay.csv',parse_dates=True,index_col=0)
df_ss.head()
def plotLinearRegression(X,xlim,Y,ylim,show_plot=True):
'''
Inputs:
- X: list of values corresponding to the independent variable
- xlim: list of two values specifying the lower and upper bounds of the horizontal axis
- Y: list of values corresponding to the dependent variable
- ylim: list of two values specifying the lower and upper bounds of the vertical axis
Plots a linear regression of the two variables and returns the ax handle if successful, otherwise False
'''
sns.set(style="white", rc={"axes.facecolor": (0, 0, 0, 0)}, font_scale=1)
try:
if len(X) > 1:
t = np.arange(len(X))
X = sm.add_constant(X)
model = sm.OLS(Y, X).fit()
predictions = model.predict(X) # make the predictions by the model
# Plotting
if show_plot:
## Scatter
fig,ax = plt.subplots(figsize=(10,10))
ax.scatter(X[:,1],Y,c=t,s=100,cmap='Blues',edgecolor='black')
## Line of Best Fit
ax.plot(X[:,1],predictions,color='red')
top = 0
bot = 0
for i in range(len(predictions)):
top += (predictions[i]-np.mean(Y))**2
bot += (Y[i] - np.mean(Y))**2
rsqrd = top/bot
## Formatting Remainder of Axis
ax.set_xlim(xlim)
ax.set_ylim(ylim)
### Print out the statistics
loc_x = min(xlim)+0.01*(max(xlim)-min(xlim))
loc_y = min(ylim)+0.01*(max(ylim)-min(ylim))
ax.text(loc_x,loc_y+0.1*(max(ylim)-min(ylim)),'Intercept: ' + str(round(model.params[0],4)))
ax.text(loc_x,loc_y+0.05*(max(ylim)-min(ylim)),'Slope: ' + str(round(model.params[1],4)))
ax.text(loc_x,loc_y,'r$^2$: ' + str(round(rsqrd,4)))
return ax, model
else:
return False, model
except Exception as inst:
print(inst)
return False, False
results_df = pd.DataFrame()
df = df_py3
#var = 'TVOC'
#low_cutoffs = [0,0,0]
#high_cutoffs = [1000000,150,100]
#var = 'Lux'
#low_cutoffs = [50,200,10]
#high_cutoffs = [1000,1000,1000]
#var = 'T_NO2'
#low_cutoffs = [15,0,0]
#high_cutoffs = [4000,100000,100000]
#var = 'RH_NO2'
#low_cutoffs = [0,0,0]
#high_cutoffs = [4000,1000,1000]
#var = 'Visible'
#low_cutoffs = [100000,0,100000]
#high_cutoffs = [1000000,1000000,1000000]
df = df_py2
var = 'Temperature [C]'
low_cutoffs = [15,0,0.05]
high_cutoffs = [1000000,150,0.25]
#var = 'Relative Humidity'
#low_cutoffs = [0,0,0]
#high_cutoffs = [1000000,150,1000]
#var = 'CO2'
#low_cutoffs = [1000,0,0]
#high_cutoffs = [1000000,150,2]
#var = 'PM_C_2p5'
#low_cutoffs = [6,0,0]
#high_cutoffs = [1000000,150,200]
#var = 'PM_C_10'
#low_cutoffs = [6,0,0]
#high_cutoffs = [1000000,150,200]
# Masking by variables - still has all participants
df_byvar = df[df['var'] == var]
# Looping through all the statistics - skipping over the ID and var columns
for column in df_byvar.columns:
if column in ['ID','var','min']:
continue
# Looping through all the participants
overall = pd.DataFrame()
for pid in df_byvar['ID'].unique():
dep_vars_ind = df_byvar[df_byvar['ID'] == pid]
indep_vars_ind = df_ss[df_ss['ID'] == pid]
combined = pd.concat([dep_vars_ind,indep_vars_ind],join='inner',axis=1)
# Removing data where the efficiency is less than zero
combined = combined[combined['REM/nREM'] > 0]
# Removing outliers from summary stats
if column in ['max','mean','median']:
combined = combined[combined[column] > low_cutoffs[0]]
combined = combined[combined[column] < high_cutoffs[0]]
if column == 'std':
combined = combined[combined[column] > low_cutoffs[1]]
combined = combined[combined[column] < high_cutoffs[1]]
if column == 'inc%':
combined = combined[combined[column] > low_cutoffs[2]]
combined = combined[combined[column] < high_cutoffs[2]]
if len(combined) > 0:
overall = pd.concat([overall,combined])
X = overall[column].values
y = overall['REM/nREM'].values
ax, model = plotLinearRegression(X,[min(X),max(X)],y,[min(y),max(y)],True)
try:
ax.set_title(f'Variable: {var.upper()}; Stat: {column.upper()}; PID: ALL; Number of Nights: {len(overall)}')
plt.show()
plt.close()
except:
pass
onevar_df = pd.DataFrame(data={'Variable':[var],'Stat':[column.upper()],'Outcome':['REM/nREM'.upper()],
'x0':[model.params[0]],'x1':[model.params[1]],'No. Points':[len(overall)],'Fit':[model.rsquared]})
results_df = pd.concat([results_df,onevar_df])
results_df
| 0.383526 | 0.845656 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.