path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
72107191/cell_43 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
path = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data'
files = glob.glob(path + '/*.csv')
all_files = []
for filename in files:
df = pd.read_csv(filename, index_col=None, header=0)
district_id = filename.split('/')[4].split('.')[0]
df['district_id'] = district_id
all_files.append(df)
engagement_data = pd.concat(all_files)
engagement_data = engagement_data.reset_index(drop=True)
product_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
districts_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
product_info.shape
product_info.columns
product_info.isnull().sum()
product_info.duplicated().any()
product_info = product_info.rename(columns={'Provider/Company Name': 'provider'})
product_info = product_info.rename(columns={'Primary Essential Function': 'essential function'})
plt.figure(figsize=(16, 10))
sns.countplot(y='provider', data=product_info, order=product_info['provider'].value_counts().index[:5], palette='flare')
plt.title('Top 7 elearning product providers', size=15)
plt.show() | code |
72107191/cell_46 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
path = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data'
files = glob.glob(path + '/*.csv')
all_files = []
for filename in files:
df = pd.read_csv(filename, index_col=None, header=0)
district_id = filename.split('/')[4].split('.')[0]
df['district_id'] = district_id
all_files.append(df)
engagement_data = pd.concat(all_files)
engagement_data = engagement_data.reset_index(drop=True)
product_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
districts_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
product_info.shape
product_info.columns
product_info.isnull().sum()
product_info.duplicated().any()
product_info = product_info.rename(columns={'Provider/Company Name': 'provider'})
product_info = product_info.rename(columns={'Primary Essential Function': 'essential function'})
plt.figure(figsize=(8, 6))
sns.countplot(x='Sector(s)', order=product_info['Sector(s)'].value_counts().index[:3], data=product_info, color='darkblue')
plt.title('Top 3 sector dominating the digital learning') | code |
72107191/cell_24 | [
"text_plain_output_1.png"
] | import pandas as pd
path = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data'
files = glob.glob(path + '/*.csv')
all_files = []
for filename in files:
df = pd.read_csv(filename, index_col=None, header=0)
district_id = filename.split('/')[4].split('.')[0]
df['district_id'] = district_id
all_files.append(df)
engagement_data = pd.concat(all_files)
engagement_data = engagement_data.reset_index(drop=True)
product_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
districts_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
districts_info.head()
districts_info.tail() | code |
72107191/cell_22 | [
"text_plain_output_1.png"
] | import pandas as pd
path = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data'
files = glob.glob(path + '/*.csv')
all_files = []
for filename in files:
df = pd.read_csv(filename, index_col=None, header=0)
district_id = filename.split('/')[4].split('.')[0]
df['district_id'] = district_id
all_files.append(df)
engagement_data = pd.concat(all_files)
engagement_data = engagement_data.reset_index(drop=True)
product_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
districts_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
product_info.shape
product_info.columns
product_info.isnull().sum()
product_info.duplicated().any() | code |
72107191/cell_37 | [
"text_plain_output_1.png"
] | import pandas as pd
path = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data'
files = glob.glob(path + '/*.csv')
all_files = []
for filename in files:
df = pd.read_csv(filename, index_col=None, header=0)
district_id = filename.split('/')[4].split('.')[0]
df['district_id'] = district_id
all_files.append(df)
engagement_data = pd.concat(all_files)
engagement_data = engagement_data.reset_index(drop=True)
product_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
districts_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
engagement_data.shape
engagement_data.columns
engagement_data.isnull().sum()
engagement_data.info() | code |
72107191/cell_36 | [
"text_plain_output_1.png"
] | import pandas as pd
path = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data'
files = glob.glob(path + '/*.csv')
all_files = []
for filename in files:
df = pd.read_csv(filename, index_col=None, header=0)
district_id = filename.split('/')[4].split('.')[0]
df['district_id'] = district_id
all_files.append(df)
engagement_data = pd.concat(all_files)
engagement_data = engagement_data.reset_index(drop=True)
product_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
districts_info = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
engagement_data.shape
engagement_data.columns
engagement_data.isnull().sum() | code |
129010375/cell_21 | [
"text_plain_output_1.png"
] | import os
image_path = '/content/dataset/semantic_drone_dataset/original_images'
mask_path = '/content/dataset/semantic_drone_dataset/label_images_semantic'
length = len(os.listdir(image_path))
train_dataset_len = int(length * 0.7)
val_dataset_len = length - train_dataset_len
train_dataset = DroneDataset(image_path, mask_path, train_dataset_len)
val_dataset = DroneDataset(image_path, mask_path, val_dataset_len, is_val=True)
train_dataset[5][0].shape | code |
129010375/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
labels = pd.read_csv('/content/class_dict_seg.csv')
classes = labels.name.values.tolist()
print(classes) | code |
129010375/cell_4 | [
"image_output_1.png"
] | !pip install kaggle | code |
129010375/cell_33 | [
"text_plain_output_1.png"
] | from torch.nn import CrossEntropyLoss
from torch.utils.data import random_split, DataLoader, Dataset
from torchvision.io import read_image, ImageReadMode
from torchvision.models.segmentation.deeplabv3 import DeepLabHead
from torchvision.models.segmentation.fcn import FCNHead
from tqdm import tqdm
from tqdm.notebook import tqdm
import os
import torch
import torch.nn.functional as F
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
image_path = '/content/dataset/semantic_drone_dataset/original_images'
mask_path = '/content/dataset/semantic_drone_dataset/label_images_semantic'
length = len(os.listdir(image_path))
class DroneDataset(Dataset):
def __init__(self, imgs_dir, masks_dir, count, is_val=False):
self.imgs_dir = imgs_dir
self.masks_dir = masks_dir
imgs_paths = os.listdir(self.imgs_dir)
imgs_paths.sort()
mask_paths = os.listdir(self.masks_dir)
mask_paths.sort()
self.is_val = is_val
if not is_val:
self.imgs_paths = imgs_paths[:count]
self.mask_paths = mask_paths[:count]
else:
self.imgs_paths = imgs_paths[-count:]
self.mask_paths = mask_paths[-count:]
def __len__(self):
return len(self.imgs_paths)
def __getitem__(self, idx):
img = read_image(os.path.join(self.imgs_dir, self.imgs_paths[idx]), ImageReadMode.RGB)
mask = read_image(os.path.join(self.masks_dir, self.mask_paths[idx]), ImageReadMode.GRAY)
return (img, mask)
torchvision.models.segmentation.DeepLabV3_ResNet50_Weights.COCO_WITH_VOC_LABELS_V1.transforms()
def img_transform(img, mask, is_val=False, size=520):
img = img.to(device)
mask = mask.to(device)
img = img.float() / 255.0
if not is_val:
trans_img = torch.nn.Sequential(transforms.Resize([size, size]), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), transforms.RandomAutocontrast(p=0.2))
else:
trans_img = trans_img = torch.nn.Sequential(transforms.Resize([size, size]), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]))
trans_mask = torch.nn.Sequential(transforms.Resize([size, size]))
trans_img.requires_grad_(False)
trans_mask.requires_grad_(False)
trans_img = trans_img.to(device)
trans_mask = trans_mask.to(device)
img = trans_img(img)
mask = trans_mask(mask)
return (img, mask.squeeze(1).long())
train_dataset_len = int(length * 0.7)
val_dataset_len = length - train_dataset_len
train_dataset = DroneDataset(image_path, mask_path, train_dataset_len)
val_dataset = DroneDataset(image_path, mask_path, val_dataset_len, is_val=True)
batch_size = 4
train_loader = DataLoader(train_dataset, batch_size, shuffle=True, num_workers=2)
val_loader = DataLoader(val_dataset, batch_size, shuffle=False, num_workers=2)
model = torchvision.models.segmentation.deeplabv3_resnet50(weights=torchvision.models.segmentation.DeepLabV3_ResNet50_Weights.DEFAULT, progress=True)
from torchvision.models.segmentation.deeplabv3 import DeepLabHead
from torchvision.models.segmentation.fcn import FCNHead
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
model.classifier = DeepLabHead(2048, 23)
model.aux_classifier = FCNHead(1024, 23)
model = model.to(device)
from torch.nn import CrossEntropyLoss
import torch.nn.functional as F
loss = CrossEntropyLoss().to(device)
learning_rate = 0.01
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
def pixel_accuracy(mask, output):
output_softmax = F.softmax(output, dim=1)
output_argmax = torch.argmax(output_softmax, dim=1)
bool_tensor = torch.flatten(mask) == torch.flatten(output_argmax)
return torch.sum(bool_tensor) / torch.numel(bool_tensor)
from tqdm import tqdm
epoch_count = 30
train_losses = []
val_losses = []
train_accs = []
val_accs = []
es_steps = 3
count_steps = 0
train_len = len(train_loader)
val_len = len(val_loader)
print(train_len)
print(val_len)
best_score = 10000000000.0
for epoch in range(epoch_count):
if count_steps >= es_steps:
print('Early stopping!')
break
train_loss_sum = 0
train_pixel_acc = 0
model.train()
for img_batch, mask_batch in tqdm(train_loader):
img_batch = img_batch.to(device, non_blocking=True)
mask_batch = mask_batch.to(device, non_blocking=True)
img_batch, mask_batch = img_transform(img_batch, mask_batch, is_val=False)
optimizer.zero_grad()
output_batch = model(img_batch)
loss_value = loss(output_batch['out'], mask_batch)
train_pixel_acc += pixel_accuracy(mask_batch, output_batch['out']).detach()
train_loss_sum += loss_value.detach()
loss_value.backward()
optimizer.step()
del output_batch
train_loss = train_loss_sum / train_len
train_acc = train_pixel_acc / train_len
train_losses.append(train_loss)
train_accs.append(train_acc)
print(f'Epoch {epoch} / {epoch_count} | train loss = {train_loss} | train acc = {train_acc}')
model.eval()
val_loss_sum = 0
val_pixel_acc = 0
for img_batch, mask_batch in tqdm(val_loader):
img_batch = img_batch.to(device, non_blocking=True)
mask_batch = mask_batch.to(device, non_blocking=True)
img_batch, mask_batch = img_transform(img_batch, mask_batch, is_val=True)
output_batch = model(img_batch)
loss_value = loss(output_batch['out'], mask_batch)
val_loss_sum = val_loss_sum + loss_value.detach()
val_pixel_acc = val_pixel_acc + pixel_accuracy(mask_batch, output_batch['out']).detach()
del output_batch
val_loss = val_loss_sum / val_len
val_acc = val_pixel_acc / val_len
val_losses.append(val_loss)
val_accs.append(val_acc)
print(f'Epoch {epoch} / {epoch_count} | val loss = {val_loss} | val acc = {val_acc}')
if val_loss < best_score:
best_score = val_loss
count_steps = 0
torch.save(model, 'best_model.pt')
else:
count_steps += 1 | code |
129010375/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd
labels = pd.read_csv('/content/class_dict_seg.csv')
labels.head() | code |
129010375/cell_18 | [
"text_plain_output_1.png"
] | import torchvision
torchvision.models.segmentation.DeepLabV3_ResNet50_Weights.COCO_WITH_VOC_LABELS_V1.transforms() | code |
129010375/cell_22 | [
"text_plain_output_1.png"
] | import os
image_path = '/content/dataset/semantic_drone_dataset/original_images'
mask_path = '/content/dataset/semantic_drone_dataset/label_images_semantic'
length = len(os.listdir(image_path))
train_dataset_len = int(length * 0.7)
val_dataset_len = length - train_dataset_len
train_dataset = DroneDataset(image_path, mask_path, train_dataset_len)
val_dataset = DroneDataset(image_path, mask_path, val_dataset_len, is_val=True)
train_dataset[5][1].shape | code |
129010375/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd
labels = pd.read_csv('/content/class_dict_seg.csv')
len(labels) | code |
32072152/cell_13 | [
"text_plain_output_1.png"
] | from scipy.stats import loguniform, uniform, randint
from sklearn.mixture import GaussianMixture
from sklearn.model_selection import RandomizedSearchCV, KFold
import numpy as np
import pandas as pd
import numpy as np
from sklearn.mixture import GaussianMixture
from sklearn.model_selection import RandomizedSearchCV, KFold
from scipy.stats import loguniform, uniform, randint
RANDOM_STATE = 1563
embeddings = np.load('/kaggle/input/biowordvec-precomputed-cord19/biowordvec.npy')
embeddings.shape
estimator = GaussianMixture(n_components=10, covariance_type='full', max_iter=100, n_init=1, init_params='kmeans', random_state=RANDOM_STATE)
N_ITER = 20
N_SPLITS = 4
param_distributions = {'n_components': randint(2, 256), 'covariance_type': ['diag', 'full', 'spherical']}
cv = KFold(n_splits=N_SPLITS, shuffle=True, random_state=RANDOM_STATE)
hp_search = RandomizedSearchCV(estimator=estimator, param_distributions=param_distributions, n_iter=N_ITER, n_jobs=N_SPLITS, cv=cv, verbose=1, random_state=RANDOM_STATE, return_train_score=True, refit=True)
hp_search.fit(embeddings)
best_model = hp_search.best_estimator_
hp_search.best_score_ | code |
32072152/cell_18 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/cleaning-cord-19-metadata/cord_metadata_cleaned.csv')
len(df)
cluster_count = df['cluster'].value_counts().sort_values()
ax = cluster_count.plot(kind='bar', figsize=(15, 5))
ax.set_xticks([])
ax.set_xlabel('Cluster id')
ax.set_ylabel('Count')
ax.grid(True) | code |
32072152/cell_8 | [
"image_output_1.png"
] | import numpy as np
embeddings = np.load('/kaggle/input/biowordvec-precomputed-cord19/biowordvec.npy')
embeddings.shape | code |
32072152/cell_14 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | from scipy.stats import loguniform, uniform, randint
from sklearn.mixture import GaussianMixture
from sklearn.model_selection import RandomizedSearchCV, KFold
import numpy as np
import pandas as pd
import numpy as np
from sklearn.mixture import GaussianMixture
from sklearn.model_selection import RandomizedSearchCV, KFold
from scipy.stats import loguniform, uniform, randint
RANDOM_STATE = 1563
embeddings = np.load('/kaggle/input/biowordvec-precomputed-cord19/biowordvec.npy')
embeddings.shape
estimator = GaussianMixture(n_components=10, covariance_type='full', max_iter=100, n_init=1, init_params='kmeans', random_state=RANDOM_STATE)
N_ITER = 20
N_SPLITS = 4
param_distributions = {'n_components': randint(2, 256), 'covariance_type': ['diag', 'full', 'spherical']}
cv = KFold(n_splits=N_SPLITS, shuffle=True, random_state=RANDOM_STATE)
hp_search = RandomizedSearchCV(estimator=estimator, param_distributions=param_distributions, n_iter=N_ITER, n_jobs=N_SPLITS, cv=cv, verbose=1, random_state=RANDOM_STATE, return_train_score=True, refit=True)
hp_search.fit(embeddings)
best_model = hp_search.best_estimator_
hp_search.best_score_
hp_search.best_params_ | code |
32072152/cell_12 | [
"text_plain_output_1.png"
] | from scipy.stats import loguniform, uniform, randint
from sklearn.mixture import GaussianMixture
from sklearn.model_selection import RandomizedSearchCV, KFold
import numpy as np
import pandas as pd
import numpy as np
from sklearn.mixture import GaussianMixture
from sklearn.model_selection import RandomizedSearchCV, KFold
from scipy.stats import loguniform, uniform, randint
RANDOM_STATE = 1563
embeddings = np.load('/kaggle/input/biowordvec-precomputed-cord19/biowordvec.npy')
embeddings.shape
estimator = GaussianMixture(n_components=10, covariance_type='full', max_iter=100, n_init=1, init_params='kmeans', random_state=RANDOM_STATE)
N_ITER = 20
N_SPLITS = 4
param_distributions = {'n_components': randint(2, 256), 'covariance_type': ['diag', 'full', 'spherical']}
cv = KFold(n_splits=N_SPLITS, shuffle=True, random_state=RANDOM_STATE)
hp_search = RandomizedSearchCV(estimator=estimator, param_distributions=param_distributions, n_iter=N_ITER, n_jobs=N_SPLITS, cv=cv, verbose=1, random_state=RANDOM_STATE, return_train_score=True, refit=True)
hp_search.fit(embeddings)
best_model = hp_search.best_estimator_ | code |
32072152/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/cleaning-cord-19-metadata/cord_metadata_cleaned.csv')
len(df) | code |
49118067/cell_13 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
names = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']
data_df = pd.read_csv('/kaggle/input/boston-house-prices/housing.csv', header=None, delim_whitespace=True, names=names)
data_df.isnull().sum()
data_df.duplicated().any()
import matplotlib.pyplot as plt
import seaborn as sns
X = data_df.drop('MEDV', axis=1)
y = data_df['MEDV']
y.head() | code |
49118067/cell_25 | [
"text_plain_output_1.png"
] | from sklearn import metrics
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.linear_model import Ridge
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
names = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']
data_df = pd.read_csv('/kaggle/input/boston-house-prices/housing.csv', header=None, delim_whitespace=True, names=names)
from sklearn.linear_model import LinearRegression
lr_all = LinearRegression()
lr_all.fit(X_train, y_train)
y_pred1 = lr_all.predict(X_test)
lr_all.intercept_
lr_all_coeffcients = pd.DataFrame([X_train.columns, lr_all.coef_]).T
lr_all_coeffcients = lr_all_coeffcients.rename(columns={0: 'Attribute', 1: 'Coefficients'})
lr_all_coeffcients
lr_all.score(X_test, y_test)
from sklearn import metrics
from sklearn.linear_model import Ridge
rr1 = Ridge(alpha=0.01)
rr1.fit(X_train, y_train)
rr2 = Ridge(alpha=100)
rr2.fit(X_train, y_train)
print('Linear regression test score:', lr_all.score(X_test, y_test))
print('Ridge regression test score with low alpha(0.1):', rr1.score(X_test, y_test))
print('Ridge regression test score with high alpha(100):', rr2.score(X_test, y_test)) | code |
49118067/cell_4 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
names = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']
data_df = pd.read_csv('/kaggle/input/boston-house-prices/housing.csv', header=None, delim_whitespace=True, names=names)
print(data_df.shape) | code |
49118067/cell_34 | [
"text_plain_output_1.png"
] | from sklearn import metrics
from sklearn.linear_model import Lasso
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.linear_model import Ridge
from sklearn.model_selection import GridSearchCV
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
names = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']
data_df = pd.read_csv('/kaggle/input/boston-house-prices/housing.csv', header=None, delim_whitespace=True, names=names)
from sklearn.linear_model import LinearRegression
lr_all = LinearRegression()
lr_all.fit(X_train, y_train)
y_pred1 = lr_all.predict(X_test)
lr_all.intercept_
lr_all_coeffcients = pd.DataFrame([X_train.columns, lr_all.coef_]).T
lr_all_coeffcients = lr_all_coeffcients.rename(columns={0: 'Attribute', 1: 'Coefficients'})
lr_all_coeffcients
from sklearn import metrics
from sklearn.linear_model import Ridge
ridge = Ridge(alpha=100)
ridge.fit(X_train, y_train)
y_pred2 = ridge.predict(X_test)
ridge.score(X_test, y_test)
from sklearn.linear_model import Lasso
lasso = Lasso(alpha=0.8)
lasso.fit(X_train, y_train)
y_pred3 = lasso.predict(X_test)
lasso.score(X_test, y_test)
lasso_coeffcients = pd.DataFrame([X_train.columns, lasso.coef_]).T
lasso_coeffcients = lasso_coeffcients.rename(columns={0: 'Attribute', 1: 'Coefficients'})
lasso_coeffcients
from sklearn.model_selection import GridSearchCV
param_grid = {'alpha': np.arange(1, 10, 500)}
ridge = Ridge()
ridge_best_alpha = GridSearchCV(ridge, param_grid)
ridge_best_alpha.fit(X_train, y_train) | code |
49118067/cell_23 | [
"text_html_output_1.png"
] | from sklearn import metrics
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
names = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']
data_df = pd.read_csv('/kaggle/input/boston-house-prices/housing.csv', header=None, delim_whitespace=True, names=names)
from sklearn.linear_model import LinearRegression
lr_all = LinearRegression()
lr_all.fit(X_train, y_train)
y_pred1 = lr_all.predict(X_test)
lr_all.intercept_
lr_all_coeffcients = pd.DataFrame([X_train.columns, lr_all.coef_]).T
lr_all_coeffcients = lr_all_coeffcients.rename(columns={0: 'Attribute', 1: 'Coefficients'})
lr_all_coeffcients
from sklearn import metrics
from sklearn.linear_model import Ridge
ridge = Ridge(alpha=100)
ridge.fit(X_train, y_train)
y_pred2 = ridge.predict(X_test)
ridge.score(X_test, y_test) | code |
49118067/cell_20 | [
"image_output_1.png"
] | from sklearn import metrics
from sklearn.linear_model import LinearRegression
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
names = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']
data_df = pd.read_csv('/kaggle/input/boston-house-prices/housing.csv', header=None, delim_whitespace=True, names=names)
from sklearn.linear_model import LinearRegression
lr_all = LinearRegression()
lr_all.fit(X_train, y_train)
y_pred1 = lr_all.predict(X_test)
lr_all.intercept_
lr_all_coeffcients = pd.DataFrame([X_train.columns, lr_all.coef_]).T
lr_all_coeffcients = lr_all_coeffcients.rename(columns={0: 'Attribute', 1: 'Coefficients'})
lr_all_coeffcients
from sklearn import metrics
print('R^2:', metrics.r2_score(y_test, y_pred1))
print('Adjusted R^2:', 1 - (1 - metrics.r2_score(y_test, y_pred1)) * (len(y_test) - 1) / (len(y_test) - X_train.shape[1] - 1))
print('MAE:', metrics.mean_absolute_error(y_test, y_pred1))
print('MSE:', metrics.mean_squared_error(y_test, y_pred1))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred1))) | code |
49118067/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
names = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']
data_df = pd.read_csv('/kaggle/input/boston-house-prices/housing.csv', header=None, delim_whitespace=True, names=names)
data_df.isnull().sum() | code |
49118067/cell_29 | [
"text_html_output_1.png"
] | from sklearn import metrics
from sklearn.linear_model import Lasso
from sklearn.linear_model import LinearRegression
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
names = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']
data_df = pd.read_csv('/kaggle/input/boston-house-prices/housing.csv', header=None, delim_whitespace=True, names=names)
from sklearn.linear_model import LinearRegression
lr_all = LinearRegression()
lr_all.fit(X_train, y_train)
y_pred1 = lr_all.predict(X_test)
lr_all.intercept_
lr_all_coeffcients = pd.DataFrame([X_train.columns, lr_all.coef_]).T
lr_all_coeffcients = lr_all_coeffcients.rename(columns={0: 'Attribute', 1: 'Coefficients'})
lr_all_coeffcients
from sklearn import metrics
from sklearn.linear_model import Lasso
lasso = Lasso(alpha=0.8)
lasso.fit(X_train, y_train)
y_pred3 = lasso.predict(X_test)
lasso.score(X_test, y_test) | code |
49118067/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
49118067/cell_7 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
names = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']
data_df = pd.read_csv('/kaggle/input/boston-house-prices/housing.csv', header=None, delim_whitespace=True, names=names)
data_df.isnull().sum()
data_df.duplicated().any() | code |
49118067/cell_18 | [
"image_output_1.png"
] | from sklearn.linear_model import LinearRegression
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
names = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']
data_df = pd.read_csv('/kaggle/input/boston-house-prices/housing.csv', header=None, delim_whitespace=True, names=names)
from sklearn.linear_model import LinearRegression
lr_all = LinearRegression()
lr_all.fit(X_train, y_train)
y_pred1 = lr_all.predict(X_test)
lr_all.intercept_
lr_all_coeffcients = pd.DataFrame([X_train.columns, lr_all.coef_]).T
lr_all_coeffcients = lr_all_coeffcients.rename(columns={0: 'Attribute', 1: 'Coefficients'})
lr_all_coeffcients
lr_all.score(X_test, y_test) | code |
49118067/cell_32 | [
"text_plain_output_1.png"
] | from sklearn import metrics
from sklearn.linear_model import Lasso
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.linear_model import Ridge
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
names = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']
data_df = pd.read_csv('/kaggle/input/boston-house-prices/housing.csv', header=None, delim_whitespace=True, names=names)
data_df.isnull().sum()
data_df.duplicated().any()
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LinearRegression
lr_all = LinearRegression()
lr_all.fit(X_train, y_train)
y_pred1 = lr_all.predict(X_test)
lr_all.intercept_
lr_all_coeffcients = pd.DataFrame([X_train.columns, lr_all.coef_]).T
lr_all_coeffcients = lr_all_coeffcients.rename(columns={0: 'Attribute', 1: 'Coefficients'})
lr_all_coeffcients
lr_all.score(X_test, y_test)
from sklearn import metrics
from sklearn.linear_model import Ridge
rr1 = Ridge(alpha=0.01)
rr1.fit(X_train, y_train)
rr2 = Ridge(alpha=100)
rr2.fit(X_train, y_train)
import matplotlib.pyplot as plt
from sklearn.linear_model import Lasso
lasso = Lasso(alpha=0.8)
lasso.fit(X_train, y_train)
y_pred3 = lasso.predict(X_test)
lasso.score(X_test, y_test)
lasso_coeffcients = pd.DataFrame([X_train.columns, lasso.coef_]).T
lasso_coeffcients = lasso_coeffcients.rename(columns={0: 'Attribute', 1: 'Coefficients'})
lasso_coeffcients
import matplotlib.pyplot as plt
plt.plot(names[0:13], lasso.coef_, alpha=0.4, linestyle='none', marker='o', markersize=7, color='green', label='Lasso Regression')
plt.plot(names[0:13], lr_all.coef_, alpha=0.4, linestyle='none', marker='d', markersize=7, color='blue', label='Linear Regression')
plt.xlabel('Coefficient Index', fontsize=16)
plt.ylabel('Coefficient Magnitude', fontsize=16)
plt.legend(fontsize=13, loc=4)
plt.show() | code |
49118067/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
names = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']
data_df = pd.read_csv('/kaggle/input/boston-house-prices/housing.csv', header=None, delim_whitespace=True, names=names)
data_df.isnull().sum()
data_df.duplicated().any()
data_df.hist(bins=12, figsize=(12, 10), grid=False) | code |
49118067/cell_16 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LinearRegression
lr_all = LinearRegression()
lr_all.fit(X_train, y_train)
y_pred1 = lr_all.predict(X_test)
lr_all.intercept_ | code |
49118067/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
names = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']
data_df = pd.read_csv('/kaggle/input/boston-house-prices/housing.csv', header=None, delim_whitespace=True, names=names)
data_df.head() | code |
49118067/cell_17 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
names = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']
data_df = pd.read_csv('/kaggle/input/boston-house-prices/housing.csv', header=None, delim_whitespace=True, names=names)
from sklearn.linear_model import LinearRegression
lr_all = LinearRegression()
lr_all.fit(X_train, y_train)
y_pred1 = lr_all.predict(X_test)
lr_all.intercept_
lr_all_coeffcients = pd.DataFrame([X_train.columns, lr_all.coef_]).T
lr_all_coeffcients = lr_all_coeffcients.rename(columns={0: 'Attribute', 1: 'Coefficients'})
lr_all_coeffcients | code |
49118067/cell_35 | [
"image_output_1.png"
] | from sklearn import metrics
from sklearn.linear_model import Lasso
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.linear_model import Ridge
from sklearn.model_selection import GridSearchCV
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
names = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']
data_df = pd.read_csv('/kaggle/input/boston-house-prices/housing.csv', header=None, delim_whitespace=True, names=names)
from sklearn.linear_model import LinearRegression
lr_all = LinearRegression()
lr_all.fit(X_train, y_train)
y_pred1 = lr_all.predict(X_test)
lr_all.intercept_
lr_all_coeffcients = pd.DataFrame([X_train.columns, lr_all.coef_]).T
lr_all_coeffcients = lr_all_coeffcients.rename(columns={0: 'Attribute', 1: 'Coefficients'})
lr_all_coeffcients
from sklearn import metrics
from sklearn.linear_model import Ridge
ridge = Ridge(alpha=100)
ridge.fit(X_train, y_train)
y_pred2 = ridge.predict(X_test)
ridge.score(X_test, y_test)
from sklearn.linear_model import Lasso
lasso = Lasso(alpha=0.8)
lasso.fit(X_train, y_train)
y_pred3 = lasso.predict(X_test)
lasso.score(X_test, y_test)
lasso_coeffcients = pd.DataFrame([X_train.columns, lasso.coef_]).T
lasso_coeffcients = lasso_coeffcients.rename(columns={0: 'Attribute', 1: 'Coefficients'})
lasso_coeffcients
from sklearn.model_selection import GridSearchCV
param_grid = {'alpha': np.arange(1, 10, 500)}
ridge = Ridge()
ridge_best_alpha = GridSearchCV(ridge, param_grid)
ridge_best_alpha.fit(X_train, y_train)
print('Best alpha for Ridge Regression:', ridge_best_alpha.best_params_)
print('Best score for Ridge Regression with best alpha:', ridge_best_alpha.best_score_) | code |
49118067/cell_31 | [
"text_plain_output_1.png"
] | from sklearn import metrics
from sklearn.linear_model import Lasso
from sklearn.linear_model import LinearRegression
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
names = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']
data_df = pd.read_csv('/kaggle/input/boston-house-prices/housing.csv', header=None, delim_whitespace=True, names=names)
from sklearn.linear_model import LinearRegression
lr_all = LinearRegression()
lr_all.fit(X_train, y_train)
y_pred1 = lr_all.predict(X_test)
lr_all.intercept_
lr_all_coeffcients = pd.DataFrame([X_train.columns, lr_all.coef_]).T
lr_all_coeffcients = lr_all_coeffcients.rename(columns={0: 'Attribute', 1: 'Coefficients'})
lr_all_coeffcients
from sklearn import metrics
from sklearn.linear_model import Lasso
lasso = Lasso(alpha=0.8)
lasso.fit(X_train, y_train)
y_pred3 = lasso.predict(X_test)
lasso.score(X_test, y_test)
lasso_coeffcients = pd.DataFrame([X_train.columns, lasso.coef_]).T
lasso_coeffcients = lasso_coeffcients.rename(columns={0: 'Attribute', 1: 'Coefficients'})
lasso_coeffcients | code |
49118067/cell_10 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
names = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']
data_df = pd.read_csv('/kaggle/input/boston-house-prices/housing.csv', header=None, delim_whitespace=True, names=names)
data_df.isnull().sum()
data_df.duplicated().any()
import matplotlib.pyplot as plt
import seaborn as sns
plt.figure(figsize=(25, 12))
sns.heatmap(data_df.corr(), vmin=-1, vmax=1, center=0, cmap='coolwarm', annot=True)
plt.show() | code |
49118067/cell_27 | [
"text_plain_output_1.png"
] | from sklearn import metrics
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.linear_model import Ridge
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
names = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']
data_df = pd.read_csv('/kaggle/input/boston-house-prices/housing.csv', header=None, delim_whitespace=True, names=names)
data_df.isnull().sum()
data_df.duplicated().any()
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LinearRegression
lr_all = LinearRegression()
lr_all.fit(X_train, y_train)
y_pred1 = lr_all.predict(X_test)
lr_all.intercept_
lr_all_coeffcients = pd.DataFrame([X_train.columns, lr_all.coef_]).T
lr_all_coeffcients = lr_all_coeffcients.rename(columns={0: 'Attribute', 1: 'Coefficients'})
lr_all_coeffcients
lr_all.score(X_test, y_test)
from sklearn import metrics
from sklearn.linear_model import Ridge
rr1 = Ridge(alpha=0.01)
rr1.fit(X_train, y_train)
rr2 = Ridge(alpha=100)
rr2.fit(X_train, y_train)
import matplotlib.pyplot as plt
plt.plot(names[0:13], lr_all.coef_, alpha=0.4, linestyle='none', marker='o', markersize=7, color='green', label='Linear Regression')
plt.plot(names[0:13], rr1.coef_, alpha=0.4, linestyle='none', marker='*', markersize=7, color='red', label='Ridge;$\\alpha=0.01$')
plt.plot(names[0:13], rr2.coef_, alpha=0.4, linestyle='none', marker='d', markersize=7, color='blue', label='Ridge;$\\alpha=100$')
plt.xlabel('Coefficient Index', fontsize=16)
plt.ylabel('Coefficient Magnitude', fontsize=16)
plt.legend(fontsize=13, loc=4)
plt.show() | code |
49118067/cell_37 | [
"text_html_output_1.png"
] | from sklearn import metrics
from sklearn.linear_model import Lasso
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.linear_model import Ridge
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import GridSearchCV
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
names = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']
data_df = pd.read_csv('/kaggle/input/boston-house-prices/housing.csv', header=None, delim_whitespace=True, names=names)
data_df.isnull().sum()
data_df.duplicated().any()
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LinearRegression
lr_all = LinearRegression()
lr_all.fit(X_train, y_train)
y_pred1 = lr_all.predict(X_test)
lr_all.intercept_
lr_all_coeffcients = pd.DataFrame([X_train.columns, lr_all.coef_]).T
lr_all_coeffcients = lr_all_coeffcients.rename(columns={0: 'Attribute', 1: 'Coefficients'})
lr_all_coeffcients
lr_all.score(X_test, y_test)
from sklearn import metrics
from sklearn.linear_model import Ridge
ridge = Ridge(alpha=100)
ridge.fit(X_train, y_train)
y_pred2 = ridge.predict(X_test)
ridge.score(X_test, y_test)
from sklearn.linear_model import Ridge
rr1 = Ridge(alpha=0.01)
rr1.fit(X_train, y_train)
rr2 = Ridge(alpha=100)
rr2.fit(X_train, y_train)
import matplotlib.pyplot as plt
from sklearn.linear_model import Lasso
lasso = Lasso(alpha=0.8)
lasso.fit(X_train, y_train)
y_pred3 = lasso.predict(X_test)
lasso.score(X_test, y_test)
lasso_coeffcients = pd.DataFrame([X_train.columns, lasso.coef_]).T
lasso_coeffcients = lasso_coeffcients.rename(columns={0: 'Attribute', 1: 'Coefficients'})
lasso_coeffcients
import matplotlib.pyplot as plt
from sklearn.model_selection import GridSearchCV
param_grid = {'alpha': np.arange(1, 10, 500)}
ridge = Ridge()
ridge_best_alpha = GridSearchCV(ridge, param_grid)
ridge_best_alpha.fit(X_train, y_train)
from sklearn.model_selection import GridSearchCV
param_grid = {'alpha': np.arange(0, 0.1, 1)}
lasso = Lasso()
lasso_best_alpha = GridSearchCV(lasso, param_grid)
lasso_best_alpha.fit(X_train, y_train)
print('Best alpha for Lasso Regression:', lasso_best_alpha.best_params_)
print('Best score for Lasso Regression with best alpha:', lasso_best_alpha.best_score_) | code |
49118067/cell_12 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
names = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']
data_df = pd.read_csv('/kaggle/input/boston-house-prices/housing.csv', header=None, delim_whitespace=True, names=names)
data_df.isnull().sum()
data_df.duplicated().any()
import matplotlib.pyplot as plt
import seaborn as sns
X = data_df.drop('MEDV', axis=1)
y = data_df['MEDV']
X.head() | code |
49118067/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
names = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']
data_df = pd.read_csv('/kaggle/input/boston-house-prices/housing.csv', header=None, delim_whitespace=True, names=names)
data_df.info() | code |
49118067/cell_36 | [
"text_plain_output_1.png"
] | from sklearn import metrics
from sklearn.linear_model import Lasso
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.linear_model import Ridge
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import GridSearchCV
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
names = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']
data_df = pd.read_csv('/kaggle/input/boston-house-prices/housing.csv', header=None, delim_whitespace=True, names=names)
data_df.isnull().sum()
data_df.duplicated().any()
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LinearRegression
lr_all = LinearRegression()
lr_all.fit(X_train, y_train)
y_pred1 = lr_all.predict(X_test)
lr_all.intercept_
lr_all_coeffcients = pd.DataFrame([X_train.columns, lr_all.coef_]).T
lr_all_coeffcients = lr_all_coeffcients.rename(columns={0: 'Attribute', 1: 'Coefficients'})
lr_all_coeffcients
lr_all.score(X_test, y_test)
from sklearn import metrics
from sklearn.linear_model import Ridge
ridge = Ridge(alpha=100)
ridge.fit(X_train, y_train)
y_pred2 = ridge.predict(X_test)
ridge.score(X_test, y_test)
from sklearn.linear_model import Ridge
rr1 = Ridge(alpha=0.01)
rr1.fit(X_train, y_train)
rr2 = Ridge(alpha=100)
rr2.fit(X_train, y_train)
import matplotlib.pyplot as plt
from sklearn.linear_model import Lasso
lasso = Lasso(alpha=0.8)
lasso.fit(X_train, y_train)
y_pred3 = lasso.predict(X_test)
lasso.score(X_test, y_test)
lasso_coeffcients = pd.DataFrame([X_train.columns, lasso.coef_]).T
lasso_coeffcients = lasso_coeffcients.rename(columns={0: 'Attribute', 1: 'Coefficients'})
lasso_coeffcients
import matplotlib.pyplot as plt
from sklearn.model_selection import GridSearchCV
param_grid = {'alpha': np.arange(1, 10, 500)}
ridge = Ridge()
ridge_best_alpha = GridSearchCV(ridge, param_grid)
ridge_best_alpha.fit(X_train, y_train)
from sklearn.model_selection import GridSearchCV
param_grid = {'alpha': np.arange(0, 0.1, 1)}
lasso = Lasso()
lasso_best_alpha = GridSearchCV(lasso, param_grid)
lasso_best_alpha.fit(X_train, y_train) | code |
90148331/cell_9 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data_movies = pd.read_csv('../input/movielens/movies.dat', sep='::', engine='python', header=None, names=['MovieId', 'Title', 'Genres'], encoding='latin 1')
data_ratings = pd.read_csv('../input/movielens/ratings.dat', sep='::', engine='python', header=None, names=['UserId', 'MovieId', 'Rating', 'Timestamp'], encoding='latin 1')
data_users = pd.read_csv('../input/movielens/users.dat', sep='::', engine='python', header=None, names=['UserId', 'Gender', 'Age', 'Occupation', 'Zip-code'], encoding='latin 1')
sns.kdeplot(data=data_users['Age'], shade=True) | code |
90148331/cell_20 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data_movies = pd.read_csv('../input/movielens/movies.dat', sep='::', engine='python', header=None, names=['MovieId', 'Title', 'Genres'], encoding='latin 1')
data_ratings = pd.read_csv('../input/movielens/ratings.dat', sep='::', engine='python', header=None, names=['UserId', 'MovieId', 'Rating', 'Timestamp'], encoding='latin 1')
data_users = pd.read_csv('../input/movielens/users.dat', sep='::', engine='python', header=None, names=['UserId', 'Gender', 'Age', 'Occupation', 'Zip-code'], encoding='latin 1')
data_ratings.sort_values(by='Rating', ascending=False).head(25)
reviews2 = data_ratings.sort_values(by='Rating', ascending=False).head(5)
reviews2 = data_ratings.sort_values(by='Rating', ascending=False).head(5)
data = [data_ratings['Timestamp'], data_users['Occupation']]
headers = ['Timestamp1', 'Occupation1']
df3 = pd.concat(data, axis=1, keys=headers)
df3.sort_values(by='Timestamp1')
sns.regplot(x=df3['Timestamp1'], y=df3['Occupation1']) | code |
90148331/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data_movies = pd.read_csv('../input/movielens/movies.dat', sep='::', engine='python', header=None, names=['MovieId', 'Title', 'Genres'], encoding='latin 1')
data_ratings = pd.read_csv('../input/movielens/ratings.dat', sep='::', engine='python', header=None, names=['UserId', 'MovieId', 'Rating', 'Timestamp'], encoding='latin 1')
data_users = pd.read_csv('../input/movielens/users.dat', sep='::', engine='python', header=None, names=['UserId', 'Gender', 'Age', 'Occupation', 'Zip-code'], encoding='latin 1')
data_movies.head() | code |
90148331/cell_19 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data_movies = pd.read_csv('../input/movielens/movies.dat', sep='::', engine='python', header=None, names=['MovieId', 'Title', 'Genres'], encoding='latin 1')
data_ratings = pd.read_csv('../input/movielens/ratings.dat', sep='::', engine='python', header=None, names=['UserId', 'MovieId', 'Rating', 'Timestamp'], encoding='latin 1')
data_users = pd.read_csv('../input/movielens/users.dat', sep='::', engine='python', header=None, names=['UserId', 'Gender', 'Age', 'Occupation', 'Zip-code'], encoding='latin 1')
data_ratings.sort_values(by='Rating', ascending=False).head(25)
reviews2 = data_ratings.sort_values(by='Rating', ascending=False).head(5)
reviews2 = data_ratings.sort_values(by='Rating', ascending=False).head(5)
data = [data_ratings['Timestamp'], data_users['Occupation']]
headers = ['Timestamp1', 'Occupation1']
df3 = pd.concat(data, axis=1, keys=headers)
df3.sort_values(by='Timestamp1') | code |
90148331/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
90148331/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data_movies = pd.read_csv('../input/movielens/movies.dat', sep='::', engine='python', header=None, names=['MovieId', 'Title', 'Genres'], encoding='latin 1')
data_ratings = pd.read_csv('../input/movielens/ratings.dat', sep='::', engine='python', header=None, names=['UserId', 'MovieId', 'Rating', 'Timestamp'], encoding='latin 1')
data_users = pd.read_csv('../input/movielens/users.dat', sep='::', engine='python', header=None, names=['UserId', 'Gender', 'Age', 'Occupation', 'Zip-code'], encoding='latin 1')
data_ratings.head() | code |
90148331/cell_18 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data_movies = pd.read_csv('../input/movielens/movies.dat', sep='::', engine='python', header=None, names=['MovieId', 'Title', 'Genres'], encoding='latin 1')
data_ratings = pd.read_csv('../input/movielens/ratings.dat', sep='::', engine='python', header=None, names=['UserId', 'MovieId', 'Rating', 'Timestamp'], encoding='latin 1')
data_users = pd.read_csv('../input/movielens/users.dat', sep='::', engine='python', header=None, names=['UserId', 'Gender', 'Age', 'Occupation', 'Zip-code'], encoding='latin 1')
data_ratings.sort_values(by='Rating', ascending=False).head(25)
reviews2 = data_ratings.sort_values(by='Rating', ascending=False).head(5)
reviews2 = data_ratings.sort_values(by='Rating', ascending=False).head(5)
data = [data_ratings['Timestamp'], data_users['Occupation']]
headers = ['Timestamp1', 'Occupation1']
df3 = pd.concat(data, axis=1, keys=headers)
df3.head() | code |
90148331/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data_movies = pd.read_csv('../input/movielens/movies.dat', sep='::', engine='python', header=None, names=['MovieId', 'Title', 'Genres'], encoding='latin 1')
data_ratings = pd.read_csv('../input/movielens/ratings.dat', sep='::', engine='python', header=None, names=['UserId', 'MovieId', 'Rating', 'Timestamp'], encoding='latin 1')
data_users = pd.read_csv('../input/movielens/users.dat', sep='::', engine='python', header=None, names=['UserId', 'Gender', 'Age', 'Occupation', 'Zip-code'], encoding='latin 1')
data_ratings.sort_values(by='Rating', ascending=False).head(25) | code |
90148331/cell_15 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data_movies = pd.read_csv('../input/movielens/movies.dat', sep='::', engine='python', header=None, names=['MovieId', 'Title', 'Genres'], encoding='latin 1')
data_ratings = pd.read_csv('../input/movielens/ratings.dat', sep='::', engine='python', header=None, names=['UserId', 'MovieId', 'Rating', 'Timestamp'], encoding='latin 1')
data_users = pd.read_csv('../input/movielens/users.dat', sep='::', engine='python', header=None, names=['UserId', 'Gender', 'Age', 'Occupation', 'Zip-code'], encoding='latin 1')
data_ratings.sort_values(by='Rating', ascending=False).head(25)
reviews2 = data_ratings.sort_values(by='Rating', ascending=False).head(5)
reviews2 = data_ratings.sort_values(by='Rating', ascending=False).head(5)
sns.barplot(x=reviews2.index, y=reviews2['Rating'])
plt.xlabel('Ratings Distribution') | code |
90148331/cell_16 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data_movies = pd.read_csv('../input/movielens/movies.dat', sep='::', engine='python', header=None, names=['MovieId', 'Title', 'Genres'], encoding='latin 1')
data_ratings = pd.read_csv('../input/movielens/ratings.dat', sep='::', engine='python', header=None, names=['UserId', 'MovieId', 'Rating', 'Timestamp'], encoding='latin 1')
data_users = pd.read_csv('../input/movielens/users.dat', sep='::', engine='python', header=None, names=['UserId', 'Gender', 'Age', 'Occupation', 'Zip-code'], encoding='latin 1')
data_users.head() | code |
90148331/cell_22 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data_movies = pd.read_csv('../input/movielens/movies.dat', sep='::', engine='python', header=None, names=['MovieId', 'Title', 'Genres'], encoding='latin 1')
data_ratings = pd.read_csv('../input/movielens/ratings.dat', sep='::', engine='python', header=None, names=['UserId', 'MovieId', 'Rating', 'Timestamp'], encoding='latin 1')
data_users = pd.read_csv('../input/movielens/users.dat', sep='::', engine='python', header=None, names=['UserId', 'Gender', 'Age', 'Occupation', 'Zip-code'], encoding='latin 1')
data_ratings.sort_values(by='Rating', ascending=False).head(25)
reviews2 = data_ratings.sort_values(by='Rating', ascending=False).head(5)
reviews2 = data_ratings.sort_values(by='Rating', ascending=False).head(5)
data = [data_ratings['Timestamp'], data_users['Occupation']]
headers = ['Timestamp1', 'Occupation1']
df3 = pd.concat(data, axis=1, keys=headers)
df3.sort_values(by='Timestamp1')
data = [data_users['Age'], data_movies['Genres']]
headers = ['Age1', 'Genres1']
df4 = pd.concat(data, axis=1, keys=headers)
sns.regplot(x=df4['Genres1'], y=df4['Age1']) | code |
88098284/cell_42 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.metrics import confusion_matrix, classification_report
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/tour-travels-customer-churn-prediction/Customertravel.csv')
df.rename(columns={'Target': 'Churn'}, inplace=True)
df.groupby('Churn').describe()
ax = sns.countplot(data=df, x='Churn')
percentage = df['Churn'].value_counts(normalize=True).values * 100
lbls = [f'{p:.1f}%' for p in percentage]
ax.bar_label(container=ax.containers[0], labels=lbls)
plt.ylim(top=800)
plt.title('Churned (0=no, 1=yes)');
df.isnull().sum()
df_coded = df.copy()
df_coded = df_coded.replace({'AnnualIncomeClass': {'Low Income': 0, 'Middle Income': 1, 'High Income': 2}})
dummies = ['BookedHotelOrNot', 'AccountSyncedToSocialMedia', 'FrequentFlyer']
df_coded = pd.get_dummies(df_coded, columns=dummies, drop_first=True)
df_coded.rename(columns={'BookedHotelOrNot_Yes': 'BookedHotel', 'AccountSyncedToSocialMedia_Yes': 'AccountSyncedToSocialMedia'}, inplace=True)
# Generic function to fit data and display results/predictions
def fit_evaluate(clf, X_train, X_test, y_train, y_test):
# fit model to training data
clf.fit(X_train, y_train)
# make predictions for test data
y_pred = clf.predict(X_test)
# print evaluation
print(classification_report(y_test, y_pred))
print('\nConfusion Matrix: \n')
s = sns.heatmap(confusion_matrix(y_test, y_pred), annot=True, fmt='g', cmap='YlGnBu');
s.set(xlabel='Predicted class', ylabel='True class')
modelRF = RandomForestClassifier()
print('* Random Forest Classifier * \n')
fit_evaluate(modelRF, X_train, X_test, y_train, y_test) | code |
88098284/cell_21 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/tour-travels-customer-churn-prediction/Customertravel.csv')
df.rename(columns={'Target': 'Churn'}, inplace=True)
df.groupby('Churn').describe()
ax = sns.countplot(data=df, x='Churn')
percentage = df['Churn'].value_counts(normalize=True).values * 100
lbls = [f'{p:.1f}%' for p in percentage]
ax.bar_label(container=ax.containers[0], labels=lbls)
plt.ylim(top=800)
plt.title('Churned (0=no, 1=yes)');
sns.countplot(data=df, x='BookedHotelOrNot', hue='Churn').set_title('Churn by Booked Hotel') | code |
88098284/cell_13 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/tour-travels-customer-churn-prediction/Customertravel.csv')
df.rename(columns={'Target': 'Churn'}, inplace=True)
df.groupby('Churn').describe()
ax = sns.countplot(data=df, x='Churn')
percentage = df['Churn'].value_counts(normalize=True).values * 100
lbls = [f'{p:.1f}%' for p in percentage]
ax.bar_label(container=ax.containers[0], labels=lbls)
plt.ylim(top=800)
plt.title('Churned (0=no, 1=yes)');
sns.countplot(data=df, x='Age', hue='Churn').set_title('Churn by Age') | code |
88098284/cell_23 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/tour-travels-customer-churn-prediction/Customertravel.csv')
df.rename(columns={'Target': 'Churn'}, inplace=True)
df.groupby('Churn').describe()
df.isnull().sum() | code |
88098284/cell_30 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/tour-travels-customer-churn-prediction/Customertravel.csv')
df.rename(columns={'Target': 'Churn'}, inplace=True)
df.groupby('Churn').describe()
df.isnull().sum()
df_coded = df.copy()
df_coded = df_coded.replace({'AnnualIncomeClass': {'Low Income': 0, 'Middle Income': 1, 'High Income': 2}})
dummies = ['BookedHotelOrNot', 'AccountSyncedToSocialMedia', 'FrequentFlyer']
df_coded = pd.get_dummies(df_coded, columns=dummies, drop_first=True)
df_coded.rename(columns={'BookedHotelOrNot_Yes': 'BookedHotel', 'AccountSyncedToSocialMedia_Yes': 'AccountSyncedToSocialMedia'}, inplace=True)
df_coded.head(6) | code |
88098284/cell_44 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from imblearn.ensemble import BalancedRandomForestClassifier, BalancedBaggingClassifier
from sklearn.metrics import confusion_matrix, classification_report
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/tour-travels-customer-churn-prediction/Customertravel.csv')
df.rename(columns={'Target': 'Churn'}, inplace=True)
df.groupby('Churn').describe()
ax = sns.countplot(data=df, x='Churn')
percentage = df['Churn'].value_counts(normalize=True).values * 100
lbls = [f'{p:.1f}%' for p in percentage]
ax.bar_label(container=ax.containers[0], labels=lbls)
plt.ylim(top=800)
plt.title('Churned (0=no, 1=yes)');
df.isnull().sum()
df_coded = df.copy()
df_coded = df_coded.replace({'AnnualIncomeClass': {'Low Income': 0, 'Middle Income': 1, 'High Income': 2}})
dummies = ['BookedHotelOrNot', 'AccountSyncedToSocialMedia', 'FrequentFlyer']
df_coded = pd.get_dummies(df_coded, columns=dummies, drop_first=True)
df_coded.rename(columns={'BookedHotelOrNot_Yes': 'BookedHotel', 'AccountSyncedToSocialMedia_Yes': 'AccountSyncedToSocialMedia'}, inplace=True)
# Generic function to fit data and display results/predictions
def fit_evaluate(clf, X_train, X_test, y_train, y_test):
# fit model to training data
clf.fit(X_train, y_train)
# make predictions for test data
y_pred = clf.predict(X_test)
# print evaluation
print(classification_report(y_test, y_pred))
print('\nConfusion Matrix: \n')
s = sns.heatmap(confusion_matrix(y_test, y_pred), annot=True, fmt='g', cmap='YlGnBu');
s.set(xlabel='Predicted class', ylabel='True class')
modelRF_bal = BalancedRandomForestClassifier()
print('* Balanced Random Forest Classifier * \n')
fit_evaluate(modelRF_bal, X_train, X_test, y_train, y_test) | code |
88098284/cell_20 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/tour-travels-customer-churn-prediction/Customertravel.csv')
df.rename(columns={'Target': 'Churn'}, inplace=True)
df.groupby('Churn').describe()
ax = sns.countplot(data=df, x='Churn')
percentage = df['Churn'].value_counts(normalize=True).values * 100
lbls = [f'{p:.1f}%' for p in percentage]
ax.bar_label(container=ax.containers[0], labels=lbls)
plt.ylim(top=800)
plt.title('Churned (0=no, 1=yes)');
sns.countplot(data=df, x='AccountSyncedToSocialMedia', hue='Churn').set_title('Churn by Account Synched To Social Media') | code |
88098284/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/tour-travels-customer-churn-prediction/Customertravel.csv')
df.head() | code |
88098284/cell_40 | [
"image_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, classification_report
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/tour-travels-customer-churn-prediction/Customertravel.csv')
df.rename(columns={'Target': 'Churn'}, inplace=True)
df.groupby('Churn').describe()
ax = sns.countplot(data=df, x='Churn')
percentage = df['Churn'].value_counts(normalize=True).values * 100
lbls = [f'{p:.1f}%' for p in percentage]
ax.bar_label(container=ax.containers[0], labels=lbls)
plt.ylim(top=800)
plt.title('Churned (0=no, 1=yes)');
df.isnull().sum()
df_coded = df.copy()
df_coded = df_coded.replace({'AnnualIncomeClass': {'Low Income': 0, 'Middle Income': 1, 'High Income': 2}})
dummies = ['BookedHotelOrNot', 'AccountSyncedToSocialMedia', 'FrequentFlyer']
df_coded = pd.get_dummies(df_coded, columns=dummies, drop_first=True)
df_coded.rename(columns={'BookedHotelOrNot_Yes': 'BookedHotel', 'AccountSyncedToSocialMedia_Yes': 'AccountSyncedToSocialMedia'}, inplace=True)
# Generic function to fit data and display results/predictions
def fit_evaluate(clf, X_train, X_test, y_train, y_test):
# fit model to training data
clf.fit(X_train, y_train)
# make predictions for test data
y_pred = clf.predict(X_test)
# print evaluation
print(classification_report(y_test, y_pred))
print('\nConfusion Matrix: \n')
s = sns.heatmap(confusion_matrix(y_test, y_pred), annot=True, fmt='g', cmap='YlGnBu');
s.set(xlabel='Predicted class', ylabel='True class')
modelLR = LogisticRegression()
fit_evaluate(modelLR, X_train, X_test, y_train, y_test)
modelLR = LogisticRegression(class_weight='balanced')
print('* Logistic regression * \n')
fit_evaluate(modelLR, X_train, X_test, y_train, y_test) | code |
88098284/cell_39 | [
"text_html_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, classification_report
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/tour-travels-customer-churn-prediction/Customertravel.csv')
df.rename(columns={'Target': 'Churn'}, inplace=True)
df.groupby('Churn').describe()
ax = sns.countplot(data=df, x='Churn')
percentage = df['Churn'].value_counts(normalize=True).values * 100
lbls = [f'{p:.1f}%' for p in percentage]
ax.bar_label(container=ax.containers[0], labels=lbls)
plt.ylim(top=800)
plt.title('Churned (0=no, 1=yes)');
df.isnull().sum()
df_coded = df.copy()
df_coded = df_coded.replace({'AnnualIncomeClass': {'Low Income': 0, 'Middle Income': 1, 'High Income': 2}})
dummies = ['BookedHotelOrNot', 'AccountSyncedToSocialMedia', 'FrequentFlyer']
df_coded = pd.get_dummies(df_coded, columns=dummies, drop_first=True)
df_coded.rename(columns={'BookedHotelOrNot_Yes': 'BookedHotel', 'AccountSyncedToSocialMedia_Yes': 'AccountSyncedToSocialMedia'}, inplace=True)
# Generic function to fit data and display results/predictions
def fit_evaluate(clf, X_train, X_test, y_train, y_test):
# fit model to training data
clf.fit(X_train, y_train)
# make predictions for test data
y_pred = clf.predict(X_test)
# print evaluation
print(classification_report(y_test, y_pred))
print('\nConfusion Matrix: \n')
s = sns.heatmap(confusion_matrix(y_test, y_pred), annot=True, fmt='g', cmap='YlGnBu');
s.set(xlabel='Predicted class', ylabel='True class')
modelLR = LogisticRegression()
print('* Logistic regression * \n')
fit_evaluate(modelLR, X_train, X_test, y_train, y_test) | code |
88098284/cell_48 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from imblearn.ensemble import BalancedRandomForestClassifier, BalancedBaggingClassifier
from sklearn.metrics import confusion_matrix, classification_report
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/tour-travels-customer-churn-prediction/Customertravel.csv')
df.rename(columns={'Target': 'Churn'}, inplace=True)
df.groupby('Churn').describe()
ax = sns.countplot(data=df, x='Churn')
percentage = df['Churn'].value_counts(normalize=True).values * 100
lbls = [f'{p:.1f}%' for p in percentage]
ax.bar_label(container=ax.containers[0], labels=lbls)
plt.ylim(top=800)
plt.title('Churned (0=no, 1=yes)');
df.isnull().sum()
df_coded = df.copy()
df_coded = df_coded.replace({'AnnualIncomeClass': {'Low Income': 0, 'Middle Income': 1, 'High Income': 2}})
dummies = ['BookedHotelOrNot', 'AccountSyncedToSocialMedia', 'FrequentFlyer']
df_coded = pd.get_dummies(df_coded, columns=dummies, drop_first=True)
df_coded.rename(columns={'BookedHotelOrNot_Yes': 'BookedHotel', 'AccountSyncedToSocialMedia_Yes': 'AccountSyncedToSocialMedia'}, inplace=True)
# Generic function to fit data and display results/predictions
def fit_evaluate(clf, X_train, X_test, y_train, y_test):
# fit model to training data
clf.fit(X_train, y_train)
# make predictions for test data
y_pred = clf.predict(X_test)
# print evaluation
print(classification_report(y_test, y_pred))
print('\nConfusion Matrix: \n')
s = sns.heatmap(confusion_matrix(y_test, y_pred), annot=True, fmt='g', cmap='YlGnBu');
s.set(xlabel='Predicted class', ylabel='True class')
modelBBC = BalancedBaggingClassifier()
print('* Balanced Bagging Classifier * \n')
fit_evaluate(modelBBC, X_train, X_test, y_train, y_test) | code |
88098284/cell_11 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/tour-travels-customer-churn-prediction/Customertravel.csv')
df.rename(columns={'Target': 'Churn'}, inplace=True)
df.groupby('Churn').describe()
ax = sns.countplot(data=df, x='Churn')
percentage = df['Churn'].value_counts(normalize=True).values * 100
lbls = [f'{p:.1f}%' for p in percentage]
ax.bar_label(container=ax.containers[0], labels=lbls)
plt.ylim(top=800)
plt.title('Churned (0=no, 1=yes)') | code |
88098284/cell_50 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.metrics import confusion_matrix, classification_report
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/tour-travels-customer-churn-prediction/Customertravel.csv')
df.rename(columns={'Target': 'Churn'}, inplace=True)
df.groupby('Churn').describe()
ax = sns.countplot(data=df, x='Churn')
percentage = df['Churn'].value_counts(normalize=True).values * 100
lbls = [f'{p:.1f}%' for p in percentage]
ax.bar_label(container=ax.containers[0], labels=lbls)
plt.ylim(top=800)
plt.title('Churned (0=no, 1=yes)');
df.isnull().sum()
df_coded = df.copy()
df_coded = df_coded.replace({'AnnualIncomeClass': {'Low Income': 0, 'Middle Income': 1, 'High Income': 2}})
dummies = ['BookedHotelOrNot', 'AccountSyncedToSocialMedia', 'FrequentFlyer']
df_coded = pd.get_dummies(df_coded, columns=dummies, drop_first=True)
df_coded.rename(columns={'BookedHotelOrNot_Yes': 'BookedHotel', 'AccountSyncedToSocialMedia_Yes': 'AccountSyncedToSocialMedia'}, inplace=True)
# Generic function to fit data and display results/predictions
def fit_evaluate(clf, X_train, X_test, y_train, y_test):
# fit model to training data
clf.fit(X_train, y_train)
# make predictions for test data
y_pred = clf.predict(X_test)
# print evaluation
print(classification_report(y_test, y_pred))
print('\nConfusion Matrix: \n')
s = sns.heatmap(confusion_matrix(y_test, y_pred), annot=True, fmt='g', cmap='YlGnBu');
s.set(xlabel='Predicted class', ylabel='True class')
modelKNN = KNeighborsClassifier()
print('* K Nearest Neighbors Classifier * \n')
fit_evaluate(modelKNN, X_train, X_test, y_train, y_test) | code |
88098284/cell_18 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/tour-travels-customer-churn-prediction/Customertravel.csv')
df.rename(columns={'Target': 'Churn'}, inplace=True)
df.groupby('Churn').describe()
ax = sns.countplot(data=df, x='Churn')
percentage = df['Churn'].value_counts(normalize=True).values * 100
lbls = [f'{p:.1f}%' for p in percentage]
ax.bar_label(container=ax.containers[0], labels=lbls)
plt.ylim(top=800)
plt.title('Churned (0=no, 1=yes)');
sns.countplot(data=df, x='AnnualIncomeClass', order=['Low Income', 'Middle Income', 'High Income'], hue='Churn').set_title('Churn by Annual Income Class') | code |
88098284/cell_32 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/tour-travels-customer-churn-prediction/Customertravel.csv')
df.rename(columns={'Target': 'Churn'}, inplace=True)
df.groupby('Churn').describe()
ax = sns.countplot(data=df, x='Churn')
percentage = df['Churn'].value_counts(normalize=True).values * 100
lbls = [f'{p:.1f}%' for p in percentage]
ax.bar_label(container=ax.containers[0], labels=lbls)
plt.ylim(top=800)
plt.title('Churned (0=no, 1=yes)');
df.isnull().sum()
df_coded = df.copy()
df_coded = df_coded.replace({'AnnualIncomeClass': {'Low Income': 0, 'Middle Income': 1, 'High Income': 2}})
dummies = ['BookedHotelOrNot', 'AccountSyncedToSocialMedia', 'FrequentFlyer']
df_coded = pd.get_dummies(df_coded, columns=dummies, drop_first=True)
df_coded.rename(columns={'BookedHotelOrNot_Yes': 'BookedHotel', 'AccountSyncedToSocialMedia_Yes': 'AccountSyncedToSocialMedia'}, inplace=True)
sns.heatmap(np.round(df_coded.corr(method='spearman'), 2), annot=True, cmap='Blues') | code |
88098284/cell_51 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/tour-travels-customer-churn-prediction/Customertravel.csv')
df.rename(columns={'Target': 'Churn'}, inplace=True)
df.groupby('Churn').describe()
ax = sns.countplot(data=df, x='Churn')
percentage = df['Churn'].value_counts(normalize=True).values * 100
lbls = [f'{p:.1f}%' for p in percentage]
ax.bar_label(container=ax.containers[0], labels=lbls)
plt.ylim(top=800)
plt.title('Churned (0=no, 1=yes)');
df.isnull().sum()
df_coded = df.copy()
df_coded = df_coded.replace({'AnnualIncomeClass': {'Low Income': 0, 'Middle Income': 1, 'High Income': 2}})
dummies = ['BookedHotelOrNot', 'AccountSyncedToSocialMedia', 'FrequentFlyer']
df_coded = pd.get_dummies(df_coded, columns=dummies, drop_first=True)
df_coded.rename(columns={'BookedHotelOrNot_Yes': 'BookedHotel', 'AccountSyncedToSocialMedia_Yes': 'AccountSyncedToSocialMedia'}, inplace=True)
error_rate = []
for i in range(1, 25):
knn = KNeighborsClassifier(n_neighbors=i)
knn.fit(X_train, y_train)
pred_i = knn.predict(X_test)
error_rate.append(np.mean(pred_i != y_test))
plt.plot(range(1, 25), error_rate, color='b', linestyle='--', marker='o', markerfacecolor='r', markeredgecolor='r', markersize=8)
plt.xlabel('K')
plt.ylabel('Error Rate')
plt.title('Error Rate vs. K Value')
print('Minimum error:', np.round(min(error_rate), 3), 'at K =', error_rate.index(min(error_rate)) + 1, '\n') | code |
88098284/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/tour-travels-customer-churn-prediction/Customertravel.csv')
df.rename(columns={'Target': 'Churn'}, inplace=True)
df.info() | code |
88098284/cell_15 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/tour-travels-customer-churn-prediction/Customertravel.csv')
df.rename(columns={'Target': 'Churn'}, inplace=True)
df.groupby('Churn').describe()
ax = sns.countplot(data=df, x='Churn')
percentage = df['Churn'].value_counts(normalize=True).values * 100
lbls = [f'{p:.1f}%' for p in percentage]
ax.bar_label(container=ax.containers[0], labels=lbls)
plt.ylim(top=800)
plt.title('Churned (0=no, 1=yes)');
sns.countplot(data=df, x='ServicesOpted', hue='Churn').set_title('Churn by Services Opted') | code |
88098284/cell_16 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/tour-travels-customer-churn-prediction/Customertravel.csv')
df.rename(columns={'Target': 'Churn'}, inplace=True)
df.groupby('Churn').describe()
ax = sns.countplot(data=df, x='Churn')
percentage = df['Churn'].value_counts(normalize=True).values * 100
lbls = [f'{p:.1f}%' for p in percentage]
ax.bar_label(container=ax.containers[0], labels=lbls)
plt.ylim(top=800)
plt.title('Churned (0=no, 1=yes)');
sns.countplot(data=df, x='FrequentFlyer', hue='Churn').set_title('Churn by Frequent Flyer Status') | code |
88098284/cell_46 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.metrics import confusion_matrix, classification_report
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/tour-travels-customer-churn-prediction/Customertravel.csv')
df.rename(columns={'Target': 'Churn'}, inplace=True)
df.groupby('Churn').describe()
ax = sns.countplot(data=df, x='Churn')
percentage = df['Churn'].value_counts(normalize=True).values * 100
lbls = [f'{p:.1f}%' for p in percentage]
ax.bar_label(container=ax.containers[0], labels=lbls)
plt.ylim(top=800)
plt.title('Churned (0=no, 1=yes)');
df.isnull().sum()
df_coded = df.copy()
df_coded = df_coded.replace({'AnnualIncomeClass': {'Low Income': 0, 'Middle Income': 1, 'High Income': 2}})
dummies = ['BookedHotelOrNot', 'AccountSyncedToSocialMedia', 'FrequentFlyer']
df_coded = pd.get_dummies(df_coded, columns=dummies, drop_first=True)
df_coded.rename(columns={'BookedHotelOrNot_Yes': 'BookedHotel', 'AccountSyncedToSocialMedia_Yes': 'AccountSyncedToSocialMedia'}, inplace=True)
# Generic function to fit data and display results/predictions
def fit_evaluate(clf, X_train, X_test, y_train, y_test):
# fit model to training data
clf.fit(X_train, y_train)
# make predictions for test data
y_pred = clf.predict(X_test)
# print evaluation
print(classification_report(y_test, y_pred))
print('\nConfusion Matrix: \n')
s = sns.heatmap(confusion_matrix(y_test, y_pred), annot=True, fmt='g', cmap='YlGnBu');
s.set(xlabel='Predicted class', ylabel='True class')
modelGB = GradientBoostingClassifier()
print('* Gradient Boosting Classifier * \n')
fit_evaluate(modelGB, X_train, X_test, y_train, y_test) | code |
88098284/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/tour-travels-customer-churn-prediction/Customertravel.csv')
df.rename(columns={'Target': 'Churn'}, inplace=True)
df.groupby('Churn').describe() | code |
33101127/cell_21 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
playstore = pd.read_csv('/kaggle/input/google-play-store-apps/googleplaystore.csv')
playstore.isnull().sum()
playstore.loc[playstore['Reviews'] == '3.0M', 'Reviews'] = 3000000
playstore.loc[playstore['Size'] == 'Varies with device', 'Size'] = '-1M'
playstore.loc[playstore['Size'] == '1,000+', 'Size'] = '-1M'
playstore.loc[:, 'Size'] = playstore.loc[:, 'Size'].apply(lambda x: float(x[:-1]) * 1000 if x[-1] == 'M' else float(x[:-1]))
def drawBarChart(df, col):
tmp = df.reset_index()
tmp = tmp.groupby(col).index.count()
playstore.groupby('Category')['App'].count().plot(kind = 'bar', figsize = (15, 7))
_ = plt.title('Number of app with different category')
sns.kdeplot(playstore.groupby('App').agg({'Rating':'mean'})['Rating'], shade = True)
_ = plt.xlim(0,5)
_ = plt.title('rating distribution for all available app')
playstore.loc[playstore['Reviews'] == '3.0M', 'Reviews'] = 3000000
playstore['Reviews'] = playstore['Reviews'].astype('int64')
tmp = playstore.groupby('Category').agg({'Reviews' : 'sum'})
tmp.plot(kind = 'bar', figsize = (15,7))
_ = plt.title('number of user reviws for cagetory')
playstore.groupby('Content Rating')['App'].count().plot(figsize=(15, 7), kind='bar')
_ = plt.title('Number of app with different Content ratings') | code |
33101127/cell_23 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
playstore = pd.read_csv('/kaggle/input/google-play-store-apps/googleplaystore.csv')
playstore.isnull().sum()
playstore.loc[playstore['Reviews'] == '3.0M', 'Reviews'] = 3000000
playstore.loc[playstore['Size'] == 'Varies with device', 'Size'] = '-1M'
playstore.loc[playstore['Size'] == '1,000+', 'Size'] = '-1M'
playstore.loc[:, 'Size'] = playstore.loc[:, 'Size'].apply(lambda x: float(x[:-1]) * 1000 if x[-1] == 'M' else float(x[:-1]))
def drawBarChart(df, col):
tmp = df.reset_index()
tmp = tmp.groupby(col).index.count()
playstore.groupby('Category')['App'].count().plot(kind = 'bar', figsize = (15, 7))
_ = plt.title('Number of app with different category')
sns.kdeplot(playstore.groupby('App').agg({'Rating':'mean'})['Rating'], shade = True)
_ = plt.xlim(0,5)
_ = plt.title('rating distribution for all available app')
playstore.loc[playstore['Reviews'] == '3.0M', 'Reviews'] = 3000000
playstore['Reviews'] = playstore['Reviews'].astype('int64')
tmp = playstore.groupby('Category').agg({'Reviews' : 'sum'})
tmp.plot(kind = 'bar', figsize = (15,7))
_ = plt.title('number of user reviws for cagetory')
playstore.groupby('Content Rating')['App'].count().plot(figsize = (15, 7), kind = 'bar')
_ = plt.title('Number of app with different Content ratings')
playstore.groupby(['Installs'])['App'].count().plot(kind='bar', figsize=(15, 7))
_ = plt.title('Total Number of downloads') | code |
33101127/cell_6 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
playstore = pd.read_csv('/kaggle/input/google-play-store-apps/googleplaystore.csv')
playstore.info() | code |
33101127/cell_19 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
playstore = pd.read_csv('/kaggle/input/google-play-store-apps/googleplaystore.csv')
playstore.isnull().sum()
playstore.loc[playstore['Reviews'] == '3.0M', 'Reviews'] = 3000000
playstore.loc[playstore['Size'] == 'Varies with device', 'Size'] = '-1M'
playstore.loc[playstore['Size'] == '1,000+', 'Size'] = '-1M'
playstore.loc[:, 'Size'] = playstore.loc[:, 'Size'].apply(lambda x: float(x[:-1]) * 1000 if x[-1] == 'M' else float(x[:-1]))
def drawBarChart(df, col):
tmp = df.reset_index()
tmp = tmp.groupby(col).index.count()
playstore.groupby('Category')['App'].count().plot(kind = 'bar', figsize = (15, 7))
_ = plt.title('Number of app with different category')
sns.kdeplot(playstore.groupby('App').agg({'Rating':'mean'})['Rating'], shade = True)
_ = plt.xlim(0,5)
_ = plt.title('rating distribution for all available app')
playstore.loc[playstore['Reviews'] == '3.0M', 'Reviews'] = 3000000
playstore['Reviews'] = playstore['Reviews'].astype('int64')
tmp = playstore.groupby('Category').agg({'Reviews': 'sum'})
tmp.plot(kind='bar', figsize=(15, 7))
_ = plt.title('number of user reviws for cagetory') | code |
33101127/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
33101127/cell_8 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
playstore = pd.read_csv('/kaggle/input/google-play-store-apps/googleplaystore.csv')
for col in playstore.columns:
print('{} has {} unique values'.format(col, len(playstore[col].unique()))) | code |
33101127/cell_16 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
playstore = pd.read_csv('/kaggle/input/google-play-store-apps/googleplaystore.csv')
playstore.isnull().sum()
playstore.loc[playstore['Reviews'] == '3.0M', 'Reviews'] = 3000000
playstore.loc[playstore['Size'] == 'Varies with device', 'Size'] = '-1M'
playstore.loc[playstore['Size'] == '1,000+', 'Size'] = '-1M'
playstore.loc[:, 'Size'] = playstore.loc[:, 'Size'].apply(lambda x: float(x[:-1]) * 1000 if x[-1] == 'M' else float(x[:-1]))
def drawBarChart(df, col):
tmp = df.reset_index()
tmp = tmp.groupby(col).index.count()
playstore.groupby('Category')['App'].count().plot(kind = 'bar', figsize = (15, 7))
_ = plt.title('Number of app with different category')
sns.kdeplot(playstore.groupby('App').agg({'Rating': 'mean'})['Rating'], shade=True)
_ = plt.xlim(0, 5)
_ = plt.title('rating distribution for all available app') | code |
33101127/cell_24 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
playstore = pd.read_csv('/kaggle/input/google-play-store-apps/googleplaystore.csv')
playstore.isnull().sum()
playstore.loc[playstore['Reviews'] == '3.0M', 'Reviews'] = 3000000
playstore.loc[playstore['Size'] == 'Varies with device', 'Size'] = '-1M'
playstore.loc[playstore['Size'] == '1,000+', 'Size'] = '-1M'
playstore.loc[:, 'Size'] = playstore.loc[:, 'Size'].apply(lambda x: float(x[:-1]) * 1000 if x[-1] == 'M' else float(x[:-1]))
def drawBarChart(df, col):
tmp = df.reset_index()
tmp = tmp.groupby(col).index.count()
playstore.groupby('Category')['App'].count().plot(kind = 'bar', figsize = (15, 7))
_ = plt.title('Number of app with different category')
sns.kdeplot(playstore.groupby('App').agg({'Rating':'mean'})['Rating'], shade = True)
_ = plt.xlim(0,5)
_ = plt.title('rating distribution for all available app')
playstore.loc[playstore['Reviews'] == '3.0M', 'Reviews'] = 3000000
playstore['Reviews'] = playstore['Reviews'].astype('int64')
tmp = playstore.groupby('Category').agg({'Reviews' : 'sum'})
tmp.plot(kind = 'bar', figsize = (15,7))
_ = plt.title('number of user reviws for cagetory')
playstore.groupby('Content Rating')['App'].count().plot(figsize = (15, 7), kind = 'bar')
_ = plt.title('Number of app with different Content ratings')
playstore.groupby(['Installs'])['App'].count().plot(kind = 'bar', figsize = (15,7))
_ = plt.title('Total Number of downloads')
playstore.loc[playstore.Type == 'Free', :].groupby(['Installs'])['App'].count().plot(kind='bar', figsize=(15, 7))
_ = plt.title('Free Apps downloads') | code |
33101127/cell_14 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
playstore = pd.read_csv('/kaggle/input/google-play-store-apps/googleplaystore.csv')
playstore.isnull().sum()
playstore.loc[playstore['Reviews'] == '3.0M', 'Reviews'] = 3000000
playstore.loc[playstore['Size'] == 'Varies with device', 'Size'] = '-1M'
playstore.loc[playstore['Size'] == '1,000+', 'Size'] = '-1M'
playstore.loc[:, 'Size'] = playstore.loc[:, 'Size'].apply(lambda x: float(x[:-1]) * 1000 if x[-1] == 'M' else float(x[:-1]))
def drawBarChart(df, col):
tmp = df.reset_index()
tmp = tmp.groupby(col).index.count()
playstore.groupby('Category')['App'].count().plot(kind='bar', figsize=(15, 7))
_ = plt.title('Number of app with different category') | code |
33101127/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
playstore = pd.read_csv('/kaggle/input/google-play-store-apps/googleplaystore.csv')
playstore.isnull().sum() | code |
33101127/cell_5 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
playstore = pd.read_csv('/kaggle/input/google-play-store-apps/googleplaystore.csv')
playstore.head() | code |
34139893/cell_13 | [
"text_plain_output_1.png"
] | from sklearn import model_selection
from sklearn.compose import ColumnTransformer
from sklearn.metrics import confusion_matrix, classification_report, roc_curve
from sklearn.pipeline import Pipeline
import pandas as pd
df = pd.read_csv('/kaggle/input/titanic/train.csv')
submission_df = pd.read_csv('/kaggle/input/titanic/test.csv')
ids = pd.read_csv('/kaggle/input/titanic/test.csv')
df.drop('PassengerId', axis=1, inplace=True)
df['FamilySize'] = df['SibSp'] + df['Parch'] + 1
df = df.drop(columns=['Ticket', 'Cabin'])
df['Title'] = df.Name.str.extract(' ([A-Za-z]+)\\.', expand=False)
df = df.drop(columns='Name')
submission_df.drop('PassengerId', axis=1, inplace=True)
submission_df['FamilySize'] = submission_df['SibSp'] + submission_df['Parch'] + 1
submission_df = submission_df.drop(columns=['Ticket', 'Cabin'])
submission_df['Title'] = submission_df.Name.str.extract(' ([A-Za-z]+)\\.', expand=False)
submission_df = submission_df.drop(columns='Name')
num_features = list(X_test.select_dtypes(include=['int64', 'float64']).columns)
numeric_transformer = Pipeline(steps=[('imputer', SimpleImputer(strategy='median')), ('center', StandardScaler()), ('scale', MinMaxScaler())])
cat_features = list(X_test.select_dtypes(include=['object']).columns)
categorical_transformer = Pipeline(steps=[('imputer', SimpleImputer(strategy='constant', fill_value='missing')), ('onehot', OneHotEncoder(handle_unknown='ignore'))])
features_preprocessor = ColumnTransformer(transformers=[('num', numeric_transformer, num_features), ('cat', categorical_transformer, cat_features)])
logreg_model = Pipeline(steps=[('features_preprocessor', features_preprocessor), ('logreg', LogisticRegression())])
xgb_model = Pipeline(steps=[('features_preprocessor', features_preprocessor), ('xgb', XGBClassifier())])
def run_exps(X_train: pd.DataFrame, y_train: pd.DataFrame, X_test: pd.DataFrame, y_test: pd.DataFrame) -> pd.DataFrame:
"""
Lightweight script to test many models and find winners
:param X_train: training split
:param y_train: training target vector
:param X_test: test split
:param y_test: test target vector
:return: DataFrame of predictions
"""
dfs = []
models = [('Logistic Regression', logreg_model), ('XGBoost', xgb_model)]
results = []
names = []
scoring = ['accuracy', 'precision_weighted', 'recall_weighted', 'f1_weighted', 'roc_auc']
target_names = list(map(str, y_train.unique()))
for name, model in models:
kfold = model_selection.KFold(n_splits=5, shuffle=True, random_state=21)
cv_results = model_selection.cross_validate(model, X_train, y_train, cv=kfold, scoring=scoring)
clf = model.fit(X_train, y_train)
y_pred = clf.predict(X_test)
results.append(cv_results)
names.append(name)
this_df = pd.DataFrame(cv_results)
this_df['model'] = name
dfs.append(this_df)
final = pd.concat(dfs, ignore_index=True)
return final
def best_model_submission(X_train: pd.DataFrame, y_train: pd.DataFrame, X_submission: pd.DataFrame, best_model: list):
"""
Run fit function to the best model
:param X_train: training split
:param y_train: training target vector
:param X_submission: test split
:param best_model: list with name and classifier
:return: submission predictions
"""
for name, model in best_model:
clf = model.fit(X_train, y_train)
y_pred = clf.predict(X_submission)
return y_pred
model = [('Logistic Regression', logreg_model)]
y_pred = best_model_submission(pd.concat([X_train, X_test], ignore_index=True), pd.concat([y_train, y_test], ignore_index=True), submission_df, model)
results = pd.DataFrame()
results['PassengerId'] = ids.PassengerId
results['Survived'] = y_pred
results | code |
34139893/cell_9 | [
"text_html_output_1.png"
] | from sklearn import model_selection
from sklearn.compose import ColumnTransformer
from sklearn.metrics import confusion_matrix, classification_report, roc_curve
from sklearn.pipeline import Pipeline
import pandas as pd
df = pd.read_csv('/kaggle/input/titanic/train.csv')
submission_df = pd.read_csv('/kaggle/input/titanic/test.csv')
ids = pd.read_csv('/kaggle/input/titanic/test.csv')
num_features = list(X_test.select_dtypes(include=['int64', 'float64']).columns)
numeric_transformer = Pipeline(steps=[('imputer', SimpleImputer(strategy='median')), ('center', StandardScaler()), ('scale', MinMaxScaler())])
cat_features = list(X_test.select_dtypes(include=['object']).columns)
categorical_transformer = Pipeline(steps=[('imputer', SimpleImputer(strategy='constant', fill_value='missing')), ('onehot', OneHotEncoder(handle_unknown='ignore'))])
features_preprocessor = ColumnTransformer(transformers=[('num', numeric_transformer, num_features), ('cat', categorical_transformer, cat_features)])
logreg_model = Pipeline(steps=[('features_preprocessor', features_preprocessor), ('logreg', LogisticRegression())])
xgb_model = Pipeline(steps=[('features_preprocessor', features_preprocessor), ('xgb', XGBClassifier())])
def run_exps(X_train: pd.DataFrame, y_train: pd.DataFrame, X_test: pd.DataFrame, y_test: pd.DataFrame) -> pd.DataFrame:
"""
Lightweight script to test many models and find winners
:param X_train: training split
:param y_train: training target vector
:param X_test: test split
:param y_test: test target vector
:return: DataFrame of predictions
"""
dfs = []
models = [('Logistic Regression', logreg_model), ('XGBoost', xgb_model)]
results = []
names = []
scoring = ['accuracy', 'precision_weighted', 'recall_weighted', 'f1_weighted', 'roc_auc']
target_names = list(map(str, y_train.unique()))
for name, model in models:
kfold = model_selection.KFold(n_splits=5, shuffle=True, random_state=21)
cv_results = model_selection.cross_validate(model, X_train, y_train, cv=kfold, scoring=scoring)
clf = model.fit(X_train, y_train)
y_pred = clf.predict(X_test)
results.append(cv_results)
names.append(name)
this_df = pd.DataFrame(cv_results)
this_df['model'] = name
dfs.append(this_df)
final = pd.concat(dfs, ignore_index=True)
return final
models_results = run_exps(X_train, y_train, X_test, y_test) | code |
34139893/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/titanic/train.csv')
submission_df = pd.read_csv('/kaggle/input/titanic/test.csv')
ids = pd.read_csv('/kaggle/input/titanic/test.csv')
df.head() | code |
34139893/cell_2 | [
"text_html_output_1.png"
] | import os
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
129018802/cell_4 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
dados = pd.read_csv('/kaggle/input/student-performance-in-mathematics/exams.csv')
dados.drop('Student ID', axis=1, inplace=True) | code |
129018802/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd
dados = pd.read_csv('/kaggle/input/student-performance-in-mathematics/exams.csv')
dados.head() | code |
106192280/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
df_cust = pd.read_csv('../input/customer-segmentation-dataset/Mall_Customers.csv')
df_cust.columns
df_cust.isna().sum() | code |
106192280/cell_25 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df_cust = pd.read_csv('../input/customer-segmentation-dataset/Mall_Customers.csv')
df_cust.columns
df_cust.isna().sum()
df_cust.isnull().sum()
df_cust_male = df_cust[df_cust['Genre'] == 'Male']
df_cust_female = df_cust[df_cust['Genre'] == 'Female']
def boxplot(frame,x,y,*args):
'''This function helps to plot the boxplot
frame : dataframe to be used
x : dataframe column for x axis
y : dataframe column for y axis
*args : to include more features like Title, palette, notch'''
plt.figure(figsize=(8,8))
bp=sns.boxplot(data=frame,x=x,y=y,palette=args[0],notch=args[1])
medians = frame.groupby([x])[y].median().sort_values(ascending=False)
vertical_offset = frame[y].median() * 0.01 # offset from median for display
for xtick in bp.get_xticks():
bp.text(xtick,medians[xtick] + vertical_offset,medians[xtick],
horizontalalignment='center',size='medium',color='blue',weight='semibold')
plt.title(args[2])
plt.grid()
plt.show()
boxplot(df_cust, 'Genre', 'Spending Score (1-100)', 'husl', False, 'Spending Score distribution of Male and Female') | code |
106192280/cell_34 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df_cust = pd.read_csv('../input/customer-segmentation-dataset/Mall_Customers.csv')
df_cust.columns
df_cust.isna().sum()
df_cust.isnull().sum()
df_cust_male = df_cust[df_cust['Genre'] == 'Male']
df_cust_female = df_cust[df_cust['Genre'] == 'Female']
def boxplot(frame,x,y,*args):
'''This function helps to plot the boxplot
frame : dataframe to be used
x : dataframe column for x axis
y : dataframe column for y axis
*args : to include more features like Title, palette, notch'''
plt.figure(figsize=(8,8))
bp=sns.boxplot(data=frame,x=x,y=y,palette=args[0],notch=args[1])
medians = frame.groupby([x])[y].median().sort_values(ascending=False)
vertical_offset = frame[y].median() * 0.01 # offset from median for display
for xtick in bp.get_xticks():
bp.text(xtick,medians[xtick] + vertical_offset,medians[xtick],
horizontalalignment='center',size='medium',color='blue',weight='semibold')
plt.title(args[2])
plt.grid()
plt.show()
df_genre = pd.DataFrame({'Genre': ['Female', 'Male'], 'Genre_code': [0, 1]})
df_cust = df_cust.merge(df_genre, on='Genre')
df_cust.drop('Genre', axis=1, inplace=True)
df_cust.columns | code |
106192280/cell_23 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df_cust = pd.read_csv('../input/customer-segmentation-dataset/Mall_Customers.csv')
df_cust.columns
df_cust.isna().sum()
df_cust.isnull().sum()
df_cust_male = df_cust[df_cust['Genre'] == 'Male']
df_cust_female = df_cust[df_cust['Genre'] == 'Female']
def boxplot(frame,x,y,*args):
'''This function helps to plot the boxplot
frame : dataframe to be used
x : dataframe column for x axis
y : dataframe column for y axis
*args : to include more features like Title, palette, notch'''
plt.figure(figsize=(8,8))
bp=sns.boxplot(data=frame,x=x,y=y,palette=args[0],notch=args[1])
medians = frame.groupby([x])[y].median().sort_values(ascending=False)
vertical_offset = frame[y].median() * 0.01 # offset from median for display
for xtick in bp.get_xticks():
bp.text(xtick,medians[xtick] + vertical_offset,medians[xtick],
horizontalalignment='center',size='medium',color='blue',weight='semibold')
plt.title(args[2])
plt.grid()
plt.show()
boxplot(df_cust, 'Genre', 'Age', 'rainbow', True, 'Age distribution of Male and Female') | code |
106192280/cell_30 | [
"image_output_1.png"
] | import pandas as pd
df_cust = pd.read_csv('../input/customer-segmentation-dataset/Mall_Customers.csv')
df_genre = pd.DataFrame({'Genre': ['Female', 'Male'], 'Genre_code': [0, 1]})
df_genre.head() | code |
106192280/cell_20 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
df_cust = pd.read_csv('../input/customer-segmentation-dataset/Mall_Customers.csv')
df_cust.columns
df_cust.isna().sum()
df_cust.isnull().sum()
df_cust_male = df_cust[df_cust['Genre'] == 'Male']
df_cust_female = df_cust[df_cust['Genre'] == 'Female']
plt.figure(figsize=(8, 8))
plt.scatter(df_cust_female['Age'], df_cust_female['Spending Score (1-100)'], c='blue', label='Female')
plt.scatter(df_cust_male['Age'], df_cust_male['Spending Score (1-100)'], c='orange', label='Male')
plt.legend(title='Gender')
plt.xlabel('Age')
plt.ylabel('Spending Score')
plt.title('Relationship of Age with Spending score')
plt.show() | code |
106192280/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd
df_cust = pd.read_csv('../input/customer-segmentation-dataset/Mall_Customers.csv')
df_cust.describe() | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.