path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
128010675/cell_29 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from datasets import load_dataset
from sklearn.metrics import accuracy_score
from torch.utils.data import DataLoader
from transformers import TrainingArguments, Trainer
from transformers import ViTForImageClassification
from transformers import ViTImageProcessor
import numpy as np
import torch
import torch
from datasets import load_dataset
train_ds = load_dataset('miladfa7/5-Flower-Types-Classification-Dataset')
train_ds = train_ds['train'].train_test_split(test_size=0.15)
train_data = train_ds['train']
test_data = train_ds['test']
label = list(set(train_data['label']))
id2label = {id: label for id, label in enumerate(label)}
label2id = {label: id for id, label in id2label.items()}
from transformers import ViTImageProcessor
processor = ViTImageProcessor.from_pretrained('google/vit-base-patch16-224-in21k')
from torchvision.transforms import CenterCrop, Compose, Normalize, RandomHorizontalFlip, RandomResizedCrop, Resize, ToTensor
image_mean, image_std = (processor.image_mean, processor.image_std)
size = processor.size['height']
normalize = Normalize(mean=image_mean, std=image_std)
_train_transforms = Compose([Resize((size, size)), RandomHorizontalFlip(), ToTensor(), normalize])
_val_transforms = Compose([Resize((size, size)), ToTensor(), normalize])
def train_transforms(examples):
examples['pixel_values'] = [_train_transforms(image.convert('RGB')) for image in examples['image']]
return examples
def val_transforms(examples):
examples['pixel_values'] = [_val_transforms(image.convert('RGB')) for image in examples['image']]
return examples
train_data.set_transform(train_transforms)
test_data.set_transform(val_transforms)
from torch.utils.data import DataLoader
import torch
def collate_fn(examples):
pixel_values = torch.stack([example['pixel_values'] for example in examples])
labels = torch.tensor([label2id[example['label']] for example in examples])
return {'pixel_values': pixel_values, 'labels': labels}
train_dataloader = DataLoader(train_data, collate_fn=collate_fn, batch_size=4)
test_dataloader = DataLoader(test_data, collate_fn=collate_fn, batch_size=4)
from transformers import ViTForImageClassification
model = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224-in21k', id2label=id2label, label2id=label2id)
from transformers import TrainingArguments, Trainer
metric_name = 'accuracy'
args = TrainingArguments('5-Flower-Types-Classification', save_strategy='epoch', evaluation_strategy='epoch', learning_rate=2e-05, per_device_train_batch_size=32, per_device_eval_batch_size=4, num_train_epochs=5, weight_decay=0.01, load_best_model_at_end=True, metric_for_best_model=metric_name, logging_dir='logs', remove_unused_columns=False)
from sklearn.metrics import accuracy_score
import numpy as np
def compute_metrics(eval_pred):
predictions, labels = eval_pred
predictions = np.argmax(predictions, axis=1)
return dict(accuracy=accuracy_score(predictions, labels))
import torch
trainer = Trainer(model, args, train_dataset=train_data, eval_dataset=test_data, data_collator=collate_fn, compute_metrics=compute_metrics, tokenizer=processor)
trainer.train()
outputs = trainer.predict(test_data)
print(outputs.metrics) | code |
128010675/cell_26 | [
"text_plain_output_1.png"
] | from datasets import load_dataset
from sklearn.metrics import accuracy_score
from torch.utils.data import DataLoader
from transformers import TrainingArguments, Trainer
from transformers import ViTForImageClassification
from transformers import ViTImageProcessor
import numpy as np
import torch
import torch
from datasets import load_dataset
train_ds = load_dataset('miladfa7/5-Flower-Types-Classification-Dataset')
train_ds = train_ds['train'].train_test_split(test_size=0.15)
train_data = train_ds['train']
test_data = train_ds['test']
label = list(set(train_data['label']))
id2label = {id: label for id, label in enumerate(label)}
label2id = {label: id for id, label in id2label.items()}
from transformers import ViTImageProcessor
processor = ViTImageProcessor.from_pretrained('google/vit-base-patch16-224-in21k')
from torchvision.transforms import CenterCrop, Compose, Normalize, RandomHorizontalFlip, RandomResizedCrop, Resize, ToTensor
image_mean, image_std = (processor.image_mean, processor.image_std)
size = processor.size['height']
normalize = Normalize(mean=image_mean, std=image_std)
_train_transforms = Compose([Resize((size, size)), RandomHorizontalFlip(), ToTensor(), normalize])
_val_transforms = Compose([Resize((size, size)), ToTensor(), normalize])
def train_transforms(examples):
examples['pixel_values'] = [_train_transforms(image.convert('RGB')) for image in examples['image']]
return examples
def val_transforms(examples):
examples['pixel_values'] = [_val_transforms(image.convert('RGB')) for image in examples['image']]
return examples
train_data.set_transform(train_transforms)
test_data.set_transform(val_transforms)
from torch.utils.data import DataLoader
import torch
def collate_fn(examples):
pixel_values = torch.stack([example['pixel_values'] for example in examples])
labels = torch.tensor([label2id[example['label']] for example in examples])
return {'pixel_values': pixel_values, 'labels': labels}
train_dataloader = DataLoader(train_data, collate_fn=collate_fn, batch_size=4)
test_dataloader = DataLoader(test_data, collate_fn=collate_fn, batch_size=4)
from transformers import ViTForImageClassification
model = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224-in21k', id2label=id2label, label2id=label2id)
from transformers import TrainingArguments, Trainer
metric_name = 'accuracy'
args = TrainingArguments('5-Flower-Types-Classification', save_strategy='epoch', evaluation_strategy='epoch', learning_rate=2e-05, per_device_train_batch_size=32, per_device_eval_batch_size=4, num_train_epochs=5, weight_decay=0.01, load_best_model_at_end=True, metric_for_best_model=metric_name, logging_dir='logs', remove_unused_columns=False)
from sklearn.metrics import accuracy_score
import numpy as np
def compute_metrics(eval_pred):
predictions, labels = eval_pred
predictions = np.argmax(predictions, axis=1)
return dict(accuracy=accuracy_score(predictions, labels))
import torch
trainer = Trainer(model, args, train_dataset=train_data, eval_dataset=test_data, data_collator=collate_fn, compute_metrics=compute_metrics, tokenizer=processor)
trainer.train() | code |
128010675/cell_2 | [
"text_plain_output_1.png"
] | !pip install -q transformers datasets | code |
128010675/cell_19 | [
"text_plain_output_1.png"
] | from datasets import load_dataset
from transformers import ViTForImageClassification
from datasets import load_dataset
train_ds = load_dataset('miladfa7/5-Flower-Types-Classification-Dataset')
train_ds = train_ds['train'].train_test_split(test_size=0.15)
train_data = train_ds['train']
test_data = train_ds['test']
label = list(set(train_data['label']))
id2label = {id: label for id, label in enumerate(label)}
label2id = {label: id for id, label in id2label.items()}
from transformers import ViTForImageClassification
model = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224-in21k', id2label=id2label, label2id=label2id) | code |
128010675/cell_28 | [
"text_plain_output_1.png"
] | from datasets import load_dataset
from sklearn.metrics import accuracy_score
from torch.utils.data import DataLoader
from transformers import TrainingArguments, Trainer
from transformers import ViTForImageClassification
from transformers import ViTImageProcessor
import numpy as np
import torch
import torch
from datasets import load_dataset
train_ds = load_dataset('miladfa7/5-Flower-Types-Classification-Dataset')
train_ds = train_ds['train'].train_test_split(test_size=0.15)
train_data = train_ds['train']
test_data = train_ds['test']
label = list(set(train_data['label']))
id2label = {id: label for id, label in enumerate(label)}
label2id = {label: id for id, label in id2label.items()}
from transformers import ViTImageProcessor
processor = ViTImageProcessor.from_pretrained('google/vit-base-patch16-224-in21k')
from torchvision.transforms import CenterCrop, Compose, Normalize, RandomHorizontalFlip, RandomResizedCrop, Resize, ToTensor
image_mean, image_std = (processor.image_mean, processor.image_std)
size = processor.size['height']
normalize = Normalize(mean=image_mean, std=image_std)
_train_transforms = Compose([Resize((size, size)), RandomHorizontalFlip(), ToTensor(), normalize])
_val_transforms = Compose([Resize((size, size)), ToTensor(), normalize])
def train_transforms(examples):
examples['pixel_values'] = [_train_transforms(image.convert('RGB')) for image in examples['image']]
return examples
def val_transforms(examples):
examples['pixel_values'] = [_val_transforms(image.convert('RGB')) for image in examples['image']]
return examples
train_data.set_transform(train_transforms)
test_data.set_transform(val_transforms)
from torch.utils.data import DataLoader
import torch
def collate_fn(examples):
pixel_values = torch.stack([example['pixel_values'] for example in examples])
labels = torch.tensor([label2id[example['label']] for example in examples])
return {'pixel_values': pixel_values, 'labels': labels}
train_dataloader = DataLoader(train_data, collate_fn=collate_fn, batch_size=4)
test_dataloader = DataLoader(test_data, collate_fn=collate_fn, batch_size=4)
from transformers import ViTForImageClassification
model = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224-in21k', id2label=id2label, label2id=label2id)
from transformers import TrainingArguments, Trainer
metric_name = 'accuracy'
args = TrainingArguments('5-Flower-Types-Classification', save_strategy='epoch', evaluation_strategy='epoch', learning_rate=2e-05, per_device_train_batch_size=32, per_device_eval_batch_size=4, num_train_epochs=5, weight_decay=0.01, load_best_model_at_end=True, metric_for_best_model=metric_name, logging_dir='logs', remove_unused_columns=False)
from sklearn.metrics import accuracy_score
import numpy as np
def compute_metrics(eval_pred):
predictions, labels = eval_pred
predictions = np.argmax(predictions, axis=1)
return dict(accuracy=accuracy_score(predictions, labels))
import torch
trainer = Trainer(model, args, train_dataset=train_data, eval_dataset=test_data, data_collator=collate_fn, compute_metrics=compute_metrics, tokenizer=processor)
trainer.train()
outputs = trainer.predict(test_data) | code |
128010675/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from datasets import load_dataset
from datasets import load_dataset
train_ds = load_dataset('miladfa7/5-Flower-Types-Classification-Dataset')
train_ds = train_ds['train'].train_test_split(test_size=0.15)
train_data = train_ds['train']
test_data = train_ds['test']
label = list(set(train_data['label']))
id2label = {id: label for id, label in enumerate(label)}
label2id = {label: id for id, label in id2label.items()}
print(id2label, label2id) | code |
128010675/cell_16 | [
"image_output_1.png"
] | from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import ViTImageProcessor
import torch
from datasets import load_dataset
train_ds = load_dataset('miladfa7/5-Flower-Types-Classification-Dataset')
train_ds = train_ds['train'].train_test_split(test_size=0.15)
train_data = train_ds['train']
test_data = train_ds['test']
label = list(set(train_data['label']))
id2label = {id: label for id, label in enumerate(label)}
label2id = {label: id for id, label in id2label.items()}
from transformers import ViTImageProcessor
processor = ViTImageProcessor.from_pretrained('google/vit-base-patch16-224-in21k')
from torchvision.transforms import CenterCrop, Compose, Normalize, RandomHorizontalFlip, RandomResizedCrop, Resize, ToTensor
image_mean, image_std = (processor.image_mean, processor.image_std)
size = processor.size['height']
normalize = Normalize(mean=image_mean, std=image_std)
_train_transforms = Compose([Resize((size, size)), RandomHorizontalFlip(), ToTensor(), normalize])
_val_transforms = Compose([Resize((size, size)), ToTensor(), normalize])
def train_transforms(examples):
examples['pixel_values'] = [_train_transforms(image.convert('RGB')) for image in examples['image']]
return examples
def val_transforms(examples):
examples['pixel_values'] = [_val_transforms(image.convert('RGB')) for image in examples['image']]
return examples
train_data.set_transform(train_transforms)
test_data.set_transform(val_transforms)
from torch.utils.data import DataLoader
import torch
def collate_fn(examples):
pixel_values = torch.stack([example['pixel_values'] for example in examples])
labels = torch.tensor([label2id[example['label']] for example in examples])
return {'pixel_values': pixel_values, 'labels': labels}
train_dataloader = DataLoader(train_data, collate_fn=collate_fn, batch_size=4)
test_dataloader = DataLoader(test_data, collate_fn=collate_fn, batch_size=4)
batch = next(iter(train_dataloader))
for k, v in batch.items():
if isinstance(v, torch.Tensor):
print(k, v.shape) | code |
128010675/cell_17 | [
"text_plain_output_1.png"
] | from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import ViTImageProcessor
import torch
from datasets import load_dataset
train_ds = load_dataset('miladfa7/5-Flower-Types-Classification-Dataset')
train_ds = train_ds['train'].train_test_split(test_size=0.15)
train_data = train_ds['train']
test_data = train_ds['test']
label = list(set(train_data['label']))
id2label = {id: label for id, label in enumerate(label)}
label2id = {label: id for id, label in id2label.items()}
from transformers import ViTImageProcessor
processor = ViTImageProcessor.from_pretrained('google/vit-base-patch16-224-in21k')
from torchvision.transforms import CenterCrop, Compose, Normalize, RandomHorizontalFlip, RandomResizedCrop, Resize, ToTensor
image_mean, image_std = (processor.image_mean, processor.image_std)
size = processor.size['height']
normalize = Normalize(mean=image_mean, std=image_std)
_train_transforms = Compose([Resize((size, size)), RandomHorizontalFlip(), ToTensor(), normalize])
_val_transforms = Compose([Resize((size, size)), ToTensor(), normalize])
def train_transforms(examples):
examples['pixel_values'] = [_train_transforms(image.convert('RGB')) for image in examples['image']]
return examples
def val_transforms(examples):
examples['pixel_values'] = [_val_transforms(image.convert('RGB')) for image in examples['image']]
return examples
train_data.set_transform(train_transforms)
test_data.set_transform(val_transforms)
from torch.utils.data import DataLoader
import torch
def collate_fn(examples):
pixel_values = torch.stack([example['pixel_values'] for example in examples])
labels = torch.tensor([label2id[example['label']] for example in examples])
return {'pixel_values': pixel_values, 'labels': labels}
train_dataloader = DataLoader(train_data, collate_fn=collate_fn, batch_size=4)
test_dataloader = DataLoader(test_data, collate_fn=collate_fn, batch_size=4)
batch = next(iter(train_dataloader))
batch = next(iter(test_dataloader))
for k, v in batch.items():
if isinstance(v, torch.Tensor):
print(k, v.shape) | code |
128010675/cell_10 | [
"text_plain_output_1.png"
] | from transformers import ViTImageProcessor
from transformers import ViTImageProcessor
processor = ViTImageProcessor.from_pretrained('google/vit-base-patch16-224-in21k') | code |
128010675/cell_12 | [
"text_plain_output_5.png",
"text_plain_output_4.png",
"text_plain_output_6.png",
"text_plain_output_3.png",
"text_plain_output_7.png",
"text_plain_output_8.png",
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from transformers import ViTImageProcessor
from transformers import ViTImageProcessor
processor = ViTImageProcessor.from_pretrained('google/vit-base-patch16-224-in21k')
from torchvision.transforms import CenterCrop, Compose, Normalize, RandomHorizontalFlip, RandomResizedCrop, Resize, ToTensor
image_mean, image_std = (processor.image_mean, processor.image_std)
size = processor.size['height']
print('Size: ', size)
normalize = Normalize(mean=image_mean, std=image_std)
_train_transforms = Compose([Resize((size, size)), RandomHorizontalFlip(), ToTensor(), normalize])
_val_transforms = Compose([Resize((size, size)), ToTensor(), normalize])
def train_transforms(examples):
examples['pixel_values'] = [_train_transforms(image.convert('RGB')) for image in examples['image']]
return examples
def val_transforms(examples):
examples['pixel_values'] = [_val_transforms(image.convert('RGB')) for image in examples['image']]
return examples | code |
328872/cell_6 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import trueskill as ts
def cleanResults(numRaces, dfResults):
for raceCol in range(1, numRaces + 1):
dfResults['R' + str(raceCol)] = dfResults['R' + str(raceCol)].str.replace('\\(|\\)|DNF-|RET-|SCP-|RDG-|RCT-|DNS-[0-9]*|DNC-[0-9]*|OCS-[0-9]*', '')
dfResults['R' + str(raceCol)] = pd.to_numeric(dfResults['R' + str(raceCol)])
return dfResults
def doRating(numRaces, dfResults, dfRatings):
for raceCol in range(1, numRaces + 1):
competed = dfRatings['Name'].isin(dfResults['Name'][dfResults['R' + str(raceCol)].notnull()])
rating_group = list(zip(dfRatings['Rating'][competed].T.values.tolist()))
dfRatings['Rating'][competed] = ts.rate(rating_group, ranks=dfResults['R' + str(raceCol)][competed].T.values.tolist())
return pd.DataFrame(dfRatings)
dfResults = pd.read_csv('../input/201608-SanFracisco-HydrofoilProTour.csv')
dfResults = cleanResults(16, dfResults)
dfRatings = pd.DataFrame()
dfRatings['Name'] = dfResults['Name']
dfRatings['Rating'] = pd.Series(np.repeat(ts.Rating(), len(dfRatings))).T.values.tolist()
dfRatings = doRating(16, dfResults, dfRatings)
dfRatings['mu'] = pd.Series(np.repeat(25.0, len(dfRatings)))
dfRatings['sigma'] = pd.Series(np.repeat(8.333, len(dfRatings)))
dfRatings['mu_minus_3sigma'] = pd.Series(np.repeat(0.0, len(dfRatings)))
for i in range(0, len(dfRatings['Rating'])):
dfRatings['mu'][i] = float(dfRatings['Rating'][i].mu)
dfRatings['sigma'][i] = float(dfRatings['Rating'][i].sigma)
dfRatings['mu_minus_3sigma'][i] = float(dfRatings['mu'][i] - 3 * dfRatings['sigma'][i]) | code |
328872/cell_7 | [
"text_html_output_1.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import trueskill as ts
def cleanResults(numRaces, dfResults):
for raceCol in range(1, numRaces + 1):
dfResults['R' + str(raceCol)] = dfResults['R' + str(raceCol)].str.replace('\\(|\\)|DNF-|RET-|SCP-|RDG-|RCT-|DNS-[0-9]*|DNC-[0-9]*|OCS-[0-9]*', '')
dfResults['R' + str(raceCol)] = pd.to_numeric(dfResults['R' + str(raceCol)])
return dfResults
def doRating(numRaces, dfResults, dfRatings):
for raceCol in range(1, numRaces + 1):
competed = dfRatings['Name'].isin(dfResults['Name'][dfResults['R' + str(raceCol)].notnull()])
rating_group = list(zip(dfRatings['Rating'][competed].T.values.tolist()))
dfRatings['Rating'][competed] = ts.rate(rating_group, ranks=dfResults['R' + str(raceCol)][competed].T.values.tolist())
return pd.DataFrame(dfRatings)
dfResults = pd.read_csv('../input/201608-SanFracisco-HydrofoilProTour.csv')
dfResults = cleanResults(16, dfResults)
dfRatings = pd.DataFrame()
dfRatings['Name'] = dfResults['Name']
dfRatings['Rating'] = pd.Series(np.repeat(ts.Rating(), len(dfRatings))).T.values.tolist()
dfRatings = doRating(16, dfResults, dfRatings)
dfRatings['mu'] = pd.Series(np.repeat(25.0, len(dfRatings)))
dfRatings['sigma'] = pd.Series(np.repeat(8.333, len(dfRatings)))
dfRatings['mu_minus_3sigma'] = pd.Series(np.repeat(0.0, len(dfRatings)))
for i in range(0, len(dfRatings['Rating'])):
dfRatings['mu'][i] = float(dfRatings['Rating'][i].mu)
dfRatings['sigma'][i] = float(dfRatings['Rating'][i].sigma)
dfRatings['mu_minus_3sigma'][i] = float(dfRatings['mu'][i] - 3 * dfRatings['sigma'][i])
dfRatings.index = dfRatings['mu_minus_3sigma'].rank(ascending=False)
dfRatings.sort('mu_minus_3sigma', ascending=False) | code |
130011524/cell_9 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import tensorflow as tf
BATCH_SIZE = 32
IMAGE_SIZE = 256
CHANNELS = 3
EPOCHS = 50
dataset = tf.keras.preprocessing.image_dataset_from_directory('/kaggle/input/potato-dataset/Potato', seed=123, shuffle=True, image_size=(IMAGE_SIZE, IMAGE_SIZE), batch_size=BATCH_SIZE)
class_names = dataset.class_names
class_names
for image_batch, label_batch in dataset.take(1):
print(image_batch.shape)
print(label_batch.numpy()) | code |
130011524/cell_25 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import tensorflow as tf
BATCH_SIZE = 32
IMAGE_SIZE = 256
CHANNELS = 3
EPOCHS = 50
dataset = tf.keras.preprocessing.image_dataset_from_directory('/kaggle/input/potato-dataset/Potato', seed=123, shuffle=True, image_size=(IMAGE_SIZE, IMAGE_SIZE), batch_size=BATCH_SIZE)
class_names = dataset.class_names
class_names
plt.figure(figsize=(10, 10))
for image_batch, labels_batch in dataset.take(1):
for i in range(12):
ax = plt.subplot(3, 4, i + 1)
plt.imshow(image_batch[i].numpy().astype("uint8"))
plt.title(class_names[labels_batch[i]])
plt.axis("off")
train_size = 0.8
len(dataset) * train_size
train_ds = dataset.take(54)
len(train_ds)
test_ds = dataset.skip(54)
len(test_ds)
val_size = 0.1
len(dataset) * val_size
val_ds = test_ds.take(6)
len(val_ds)
test_ds = test_ds.skip(6)
len(test_ds)
def get_dataset_partitions_tf(ds, train_split=0.8, val_split=0.1, test_split=0.1, shuffle=True, shuffle_size=10000):
assert train_split + test_split + val_split == 1
ds_size = len(ds)
if shuffle:
ds = ds.shuffle(shuffle_size, seed=12)
train_size = int(train_split * ds_size)
val_size = int(val_split * ds_size)
train_ds = ds.take(train_size)
val_ds = ds.skip(train_size).take(val_size)
test_ds = ds.skip(train_size).skip(val_size)
return (train_ds, val_ds, test_ds)
len(test_ds) | code |
130011524/cell_23 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import tensorflow as tf
BATCH_SIZE = 32
IMAGE_SIZE = 256
CHANNELS = 3
EPOCHS = 50
dataset = tf.keras.preprocessing.image_dataset_from_directory('/kaggle/input/potato-dataset/Potato', seed=123, shuffle=True, image_size=(IMAGE_SIZE, IMAGE_SIZE), batch_size=BATCH_SIZE)
class_names = dataset.class_names
class_names
plt.figure(figsize=(10, 10))
for image_batch, labels_batch in dataset.take(1):
for i in range(12):
ax = plt.subplot(3, 4, i + 1)
plt.imshow(image_batch[i].numpy().astype("uint8"))
plt.title(class_names[labels_batch[i]])
plt.axis("off")
train_size = 0.8
len(dataset) * train_size
train_ds = dataset.take(54)
len(train_ds)
test_ds = dataset.skip(54)
len(test_ds)
val_size = 0.1
len(dataset) * val_size
val_ds = test_ds.take(6)
len(val_ds)
test_ds = test_ds.skip(6)
len(test_ds)
def get_dataset_partitions_tf(ds, train_split=0.8, val_split=0.1, test_split=0.1, shuffle=True, shuffle_size=10000):
assert train_split + test_split + val_split == 1
ds_size = len(ds)
if shuffle:
ds = ds.shuffle(shuffle_size, seed=12)
train_size = int(train_split * ds_size)
val_size = int(val_split * ds_size)
train_ds = ds.take(train_size)
val_ds = ds.skip(train_size).take(val_size)
test_ds = ds.skip(train_size).skip(val_size)
return (train_ds, val_ds, test_ds)
len(train_ds) | code |
130011524/cell_20 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import tensorflow as tf
BATCH_SIZE = 32
IMAGE_SIZE = 256
CHANNELS = 3
EPOCHS = 50
dataset = tf.keras.preprocessing.image_dataset_from_directory('/kaggle/input/potato-dataset/Potato', seed=123, shuffle=True, image_size=(IMAGE_SIZE, IMAGE_SIZE), batch_size=BATCH_SIZE)
class_names = dataset.class_names
class_names
plt.figure(figsize=(10, 10))
for image_batch, labels_batch in dataset.take(1):
for i in range(12):
ax = plt.subplot(3, 4, i + 1)
plt.imshow(image_batch[i].numpy().astype("uint8"))
plt.title(class_names[labels_batch[i]])
plt.axis("off")
train_ds = dataset.take(54)
len(train_ds)
test_ds = dataset.skip(54)
len(test_ds)
val_ds = test_ds.take(6)
len(val_ds)
test_ds = test_ds.skip(6)
len(test_ds) | code |
130011524/cell_6 | [
"text_plain_output_1.png"
] | import tensorflow as tf
BATCH_SIZE = 32
IMAGE_SIZE = 256
CHANNELS = 3
EPOCHS = 50
dataset = tf.keras.preprocessing.image_dataset_from_directory('/kaggle/input/potato-dataset/Potato', seed=123, shuffle=True, image_size=(IMAGE_SIZE, IMAGE_SIZE), batch_size=BATCH_SIZE) | code |
130011524/cell_11 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import tensorflow as tf
BATCH_SIZE = 32
IMAGE_SIZE = 256
CHANNELS = 3
EPOCHS = 50
dataset = tf.keras.preprocessing.image_dataset_from_directory('/kaggle/input/potato-dataset/Potato', seed=123, shuffle=True, image_size=(IMAGE_SIZE, IMAGE_SIZE), batch_size=BATCH_SIZE)
class_names = dataset.class_names
class_names
plt.figure(figsize=(10, 10))
for image_batch, labels_batch in dataset.take(1):
for i in range(12):
ax = plt.subplot(3, 4, i + 1)
plt.imshow(image_batch[i].numpy().astype('uint8'))
plt.title(class_names[labels_batch[i]])
plt.axis('off') | code |
130011524/cell_19 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import tensorflow as tf
BATCH_SIZE = 32
IMAGE_SIZE = 256
CHANNELS = 3
EPOCHS = 50
dataset = tf.keras.preprocessing.image_dataset_from_directory('/kaggle/input/potato-dataset/Potato', seed=123, shuffle=True, image_size=(IMAGE_SIZE, IMAGE_SIZE), batch_size=BATCH_SIZE)
class_names = dataset.class_names
class_names
plt.figure(figsize=(10, 10))
for image_batch, labels_batch in dataset.take(1):
for i in range(12):
ax = plt.subplot(3, 4, i + 1)
plt.imshow(image_batch[i].numpy().astype("uint8"))
plt.title(class_names[labels_batch[i]])
plt.axis("off")
train_ds = dataset.take(54)
len(train_ds)
test_ds = dataset.skip(54)
len(test_ds)
val_ds = test_ds.take(6)
len(val_ds) | code |
130011524/cell_1 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import matplotlib as plt
import os
"\nfor dirname, _, filenames in os.walk('/kaggle/input'):\n for filename in filenames:\n print(os.path.join(dirname, filename))\n" | code |
130011524/cell_7 | [
"text_plain_output_1.png"
] | import tensorflow as tf
BATCH_SIZE = 32
IMAGE_SIZE = 256
CHANNELS = 3
EPOCHS = 50
dataset = tf.keras.preprocessing.image_dataset_from_directory('/kaggle/input/potato-dataset/Potato', seed=123, shuffle=True, image_size=(IMAGE_SIZE, IMAGE_SIZE), batch_size=BATCH_SIZE)
class_names = dataset.class_names
class_names | code |
130011524/cell_18 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import tensorflow as tf
BATCH_SIZE = 32
IMAGE_SIZE = 256
CHANNELS = 3
EPOCHS = 50
dataset = tf.keras.preprocessing.image_dataset_from_directory('/kaggle/input/potato-dataset/Potato', seed=123, shuffle=True, image_size=(IMAGE_SIZE, IMAGE_SIZE), batch_size=BATCH_SIZE)
class_names = dataset.class_names
class_names
plt.figure(figsize=(10, 10))
for image_batch, labels_batch in dataset.take(1):
for i in range(12):
ax = plt.subplot(3, 4, i + 1)
plt.imshow(image_batch[i].numpy().astype("uint8"))
plt.title(class_names[labels_batch[i]])
plt.axis("off")
train_ds = dataset.take(54)
len(train_ds)
test_ds = dataset.skip(54)
len(test_ds)
val_size = 0.1
len(dataset) * val_size | code |
130011524/cell_32 | [
"text_plain_output_1.png"
] | from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, BatchNormalization, AveragePooling2D, GlobalAveragePooling2D
from keras.models import Model,Sequential, Input, load_model
from keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.optimizers import Adam
BATCH_SIZE = 32
IMAGE_SIZE = 256
CHANNELS = 3
EPOCHS = 50
def build_densenet():
densenet = DenseNet121(weights='imagenet', include_top=False)
input = Input(shape=(SIZE, SIZE, N_ch))
x = Conv2D(3, (3, 3), padding='same')(input)
x = densenet(x)
x = GlobalAveragePooling2D()(x)
x = BatchNormalization()(x)
x = Dropout(0.5)(x)
x = Dense(256, activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.5)(x)
output = Dense(15, activation='softmax', name='root')(x)
model = Model(input, output)
optimizer = Adam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=0.1, decay=0.0)
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
model.summary()
return model
model = build_densenet()
annealer = ReduceLROnPlateau(monitor='val_accuracy', factor=0.5, patience=5, verbose=1, min_lr=0.001)
checkpoint = ModelCheckpoint('model.h5', verbose=1, save_best_only=True)
datagen = ImageDataGenerator(rotation_range=360, width_shift_range=0.2, height_shift_range=0.2, zoom_range=0.2, horizontal_flip=True, vertical_flip=True)
datagen.fit(X_train)
hist = model.fit_generator(datagen.flow(X_train, Y_train, batch_size=BATCH_SIZE), steps_per_epoch=X_train.shape[0] // BATCH_SIZE, epochs=EPOCHS, verbose=2, callbacks=[annealer, checkpoint], validation_data=(X_val, Y_val)) | code |
130011524/cell_8 | [
"text_plain_output_1.png"
] | import tensorflow as tf
BATCH_SIZE = 32
IMAGE_SIZE = 256
CHANNELS = 3
EPOCHS = 50
dataset = tf.keras.preprocessing.image_dataset_from_directory('/kaggle/input/potato-dataset/Potato', seed=123, shuffle=True, image_size=(IMAGE_SIZE, IMAGE_SIZE), batch_size=BATCH_SIZE)
class_names = dataset.class_names
class_names
len(dataset) | code |
130011524/cell_15 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import tensorflow as tf
BATCH_SIZE = 32
IMAGE_SIZE = 256
CHANNELS = 3
EPOCHS = 50
dataset = tf.keras.preprocessing.image_dataset_from_directory('/kaggle/input/potato-dataset/Potato', seed=123, shuffle=True, image_size=(IMAGE_SIZE, IMAGE_SIZE), batch_size=BATCH_SIZE)
class_names = dataset.class_names
class_names
plt.figure(figsize=(10, 10))
for image_batch, labels_batch in dataset.take(1):
for i in range(12):
ax = plt.subplot(3, 4, i + 1)
plt.imshow(image_batch[i].numpy().astype("uint8"))
plt.title(class_names[labels_batch[i]])
plt.axis("off")
train_size = 0.8
len(dataset) * train_size | code |
130011524/cell_16 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import tensorflow as tf
BATCH_SIZE = 32
IMAGE_SIZE = 256
CHANNELS = 3
EPOCHS = 50
dataset = tf.keras.preprocessing.image_dataset_from_directory('/kaggle/input/potato-dataset/Potato', seed=123, shuffle=True, image_size=(IMAGE_SIZE, IMAGE_SIZE), batch_size=BATCH_SIZE)
class_names = dataset.class_names
class_names
plt.figure(figsize=(10, 10))
for image_batch, labels_batch in dataset.take(1):
for i in range(12):
ax = plt.subplot(3, 4, i + 1)
plt.imshow(image_batch[i].numpy().astype("uint8"))
plt.title(class_names[labels_batch[i]])
plt.axis("off")
train_ds = dataset.take(54)
len(train_ds) | code |
130011524/cell_17 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import tensorflow as tf
BATCH_SIZE = 32
IMAGE_SIZE = 256
CHANNELS = 3
EPOCHS = 50
dataset = tf.keras.preprocessing.image_dataset_from_directory('/kaggle/input/potato-dataset/Potato', seed=123, shuffle=True, image_size=(IMAGE_SIZE, IMAGE_SIZE), batch_size=BATCH_SIZE)
class_names = dataset.class_names
class_names
plt.figure(figsize=(10, 10))
for image_batch, labels_batch in dataset.take(1):
for i in range(12):
ax = plt.subplot(3, 4, i + 1)
plt.imshow(image_batch[i].numpy().astype("uint8"))
plt.title(class_names[labels_batch[i]])
plt.axis("off")
train_ds = dataset.take(54)
len(train_ds)
test_ds = dataset.skip(54)
len(test_ds) | code |
130011524/cell_24 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import tensorflow as tf
BATCH_SIZE = 32
IMAGE_SIZE = 256
CHANNELS = 3
EPOCHS = 50
dataset = tf.keras.preprocessing.image_dataset_from_directory('/kaggle/input/potato-dataset/Potato', seed=123, shuffle=True, image_size=(IMAGE_SIZE, IMAGE_SIZE), batch_size=BATCH_SIZE)
class_names = dataset.class_names
class_names
plt.figure(figsize=(10, 10))
for image_batch, labels_batch in dataset.take(1):
for i in range(12):
ax = plt.subplot(3, 4, i + 1)
plt.imshow(image_batch[i].numpy().astype("uint8"))
plt.title(class_names[labels_batch[i]])
plt.axis("off")
train_size = 0.8
len(dataset) * train_size
train_ds = dataset.take(54)
len(train_ds)
test_ds = dataset.skip(54)
len(test_ds)
val_size = 0.1
len(dataset) * val_size
val_ds = test_ds.take(6)
len(val_ds)
test_ds = test_ds.skip(6)
len(test_ds)
def get_dataset_partitions_tf(ds, train_split=0.8, val_split=0.1, test_split=0.1, shuffle=True, shuffle_size=10000):
assert train_split + test_split + val_split == 1
ds_size = len(ds)
if shuffle:
ds = ds.shuffle(shuffle_size, seed=12)
train_size = int(train_split * ds_size)
val_size = int(val_split * ds_size)
train_ds = ds.take(train_size)
val_ds = ds.skip(train_size).take(val_size)
test_ds = ds.skip(train_size).skip(val_size)
return (train_ds, val_ds, test_ds)
len(val_ds) | code |
130011524/cell_14 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import tensorflow as tf
BATCH_SIZE = 32
IMAGE_SIZE = 256
CHANNELS = 3
EPOCHS = 50
dataset = tf.keras.preprocessing.image_dataset_from_directory('/kaggle/input/potato-dataset/Potato', seed=123, shuffle=True, image_size=(IMAGE_SIZE, IMAGE_SIZE), batch_size=BATCH_SIZE)
class_names = dataset.class_names
class_names
plt.figure(figsize=(10, 10))
for image_batch, labels_batch in dataset.take(1):
for i in range(12):
ax = plt.subplot(3, 4, i + 1)
plt.imshow(image_batch[i].numpy().astype("uint8"))
plt.title(class_names[labels_batch[i]])
plt.axis("off")
len(dataset) | code |
130011524/cell_10 | [
"text_plain_output_1.png"
] | import tensorflow as tf
BATCH_SIZE = 32
IMAGE_SIZE = 256
CHANNELS = 3
EPOCHS = 50
dataset = tf.keras.preprocessing.image_dataset_from_directory('/kaggle/input/potato-dataset/Potato', seed=123, shuffle=True, image_size=(IMAGE_SIZE, IMAGE_SIZE), batch_size=BATCH_SIZE)
class_names = dataset.class_names
class_names
for image_batch, label_batch in dataset.take(1):
print(image_batch[0].numpy()) | code |
16120680/cell_13 | [
"text_html_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import string
df = pd.read_csv('../input/train.csv')
df['CabinPrefix'] = [str(cabinname)[0] for cabinname in df.Cabin]
df.loc[df.Cabin.isnull(), 'CabinPrefix'] = None
df['CabinKnown'] = [value for value in df.Cabin.isnull()]
df['TicketSplitLen'] = [len(t.split()) for t in df.Ticket]
df['Sex_Ind'] = -1
df.loc[df.Sex == 'female', 'Sex_Ind'] = 1
df.loc[df.Sex == 'male', 'Sex_Ind'] = 2
df['Age'] = df.Age.fillna(0)
cols = ['Pclass','Sex', 'SibSp', 'Parch', 'Embarked', 'CabinPrefix', 'TicketSplitLen', 'CabinKnown']
for col in cols:
q = df.groupby(col).Survived.sum()
t = df.groupby(col).Survived.count()
fig, ax = plt.subplots()
pos = [i for i,name in enumerate(q.index)]
vals = [name for i,name in enumerate(q.index)]
ax.barh(pos, t, color='r', label='died')
ax.barh(pos, q, label='survived')
ax.set_yticks(pos)
ax.set_yticklabels(vals)
ax.set_ylabel(col)
ax.legend()
letter_map = {}
for i, letter in enumerate(list(string.ascii_lowercase.upper())):
letter_map[letter] = i + 1
letter_map[None] = -1
df['CabinPrefixInd'] = [letter_map[cabin_prefix] for cabin_prefix in df.CabinPrefix]
from sklearn.model_selection import train_test_split
X = np.array(df[['Pclass', 'Age', 'SibSp', 'Parch', 'Fare', 'TicketSplitLen', 'Sex_Ind', 'CabinPrefixInd']])
y = np.array(df.Survived)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
X_train.shape
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
lr_model = LogisticRegression()
lr_model.fit(X_train, y_train)
predictions = lr_model.predict(X_test)
accuracy = accuracy_score(y_test, predictions)
precision = precision_score(y_test, predictions)
recall = recall_score(y_test, predictions)
f1 = f1_score(y_test, predictions)
parameters = lr_model.coef_
comparison = pd.DataFrame([['LR', accuracy, precision, recall, f1]], columns=['Model', 'Accuracy', 'Precision', 'Recall', 'F1'])
print(f'Accuracy with LR: {accuracy}')
print(f'Precision with LR: {precision}')
print(f'Recall with LR: {recall}')
print(f'F1 with LR: {f1}') | code |
16120680/cell_9 | [
"image_output_5.png",
"image_output_4.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import string
df = pd.read_csv('../input/train.csv')
df['CabinPrefix'] = [str(cabinname)[0] for cabinname in df.Cabin]
df.loc[df.Cabin.isnull(), 'CabinPrefix'] = None
df['CabinKnown'] = [value for value in df.Cabin.isnull()]
df['TicketSplitLen'] = [len(t.split()) for t in df.Ticket]
df['Sex_Ind'] = -1
df.loc[df.Sex == 'female', 'Sex_Ind'] = 1
df.loc[df.Sex == 'male', 'Sex_Ind'] = 2
df['Age'] = df.Age.fillna(0)
cols = ['Pclass','Sex', 'SibSp', 'Parch', 'Embarked', 'CabinPrefix', 'TicketSplitLen', 'CabinKnown']
for col in cols:
q = df.groupby(col).Survived.sum()
t = df.groupby(col).Survived.count()
fig, ax = plt.subplots()
pos = [i for i,name in enumerate(q.index)]
vals = [name for i,name in enumerate(q.index)]
ax.barh(pos, t, color='r', label='died')
ax.barh(pos, q, label='survived')
ax.set_yticks(pos)
ax.set_yticklabels(vals)
ax.set_ylabel(col)
ax.legend()
letter_map = {}
for i, letter in enumerate(list(string.ascii_lowercase.upper())):
letter_map[letter] = i + 1
letter_map[None] = -1
df['CabinPrefixInd'] = [letter_map[cabin_prefix] for cabin_prefix in df.CabinPrefix]
df.tail() | code |
16120680/cell_20 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import train_test_split
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.models import Sequential
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import string
df = pd.read_csv('../input/train.csv')
df['CabinPrefix'] = [str(cabinname)[0] for cabinname in df.Cabin]
df.loc[df.Cabin.isnull(), 'CabinPrefix'] = None
df['CabinKnown'] = [value for value in df.Cabin.isnull()]
df['TicketSplitLen'] = [len(t.split()) for t in df.Ticket]
df['Sex_Ind'] = -1
df.loc[df.Sex == 'female', 'Sex_Ind'] = 1
df.loc[df.Sex == 'male', 'Sex_Ind'] = 2
df['Age'] = df.Age.fillna(0)
cols = ['Pclass','Sex', 'SibSp', 'Parch', 'Embarked', 'CabinPrefix', 'TicketSplitLen', 'CabinKnown']
for col in cols:
q = df.groupby(col).Survived.sum()
t = df.groupby(col).Survived.count()
fig, ax = plt.subplots()
pos = [i for i,name in enumerate(q.index)]
vals = [name for i,name in enumerate(q.index)]
ax.barh(pos, t, color='r', label='died')
ax.barh(pos, q, label='survived')
ax.set_yticks(pos)
ax.set_yticklabels(vals)
ax.set_ylabel(col)
ax.legend()
letter_map = {}
for i, letter in enumerate(list(string.ascii_lowercase.upper())):
letter_map[letter] = i + 1
letter_map[None] = -1
df['CabinPrefixInd'] = [letter_map[cabin_prefix] for cabin_prefix in df.CabinPrefix]
from sklearn.model_selection import train_test_split
X = np.array(df[['Pclass', 'Age', 'SibSp', 'Parch', 'Fare', 'TicketSplitLen', 'Sex_Ind', 'CabinPrefixInd']])
y = np.array(df.Survived)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
X_train.shape
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
lr_model = LogisticRegression()
lr_model.fit(X_train, y_train)
predictions = lr_model.predict(X_test)
accuracy = accuracy_score(y_test, predictions)
precision = precision_score(y_test, predictions)
recall = recall_score(y_test, predictions)
f1 = f1_score(y_test, predictions)
parameters = lr_model.coef_
comparison = pd.DataFrame([['LR', accuracy, precision, recall, f1]], columns=['Model', 'Accuracy', 'Precision', 'Recall', 'F1'])
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
nn_model = Sequential()
nn_model.add(Dense(16, activation='relu', input_shape=(8,)))
nn_model.add(Dropout(0.3, noise_shape=None, seed=None))
nn_model.add(Dense(32, activation='relu'))
nn_model.add(Dropout(0.3, noise_shape=None, seed=None))
nn_model.add(Dense(64, activation='relu'))
nn_model.add(Dropout(0.2, noise_shape=None, seed=None))
nn_model.add(Dense(1, activation='sigmoid'))
nn_model.summary()
nn_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
results = nn_model.fit(X_train, y_train, epochs=200, batch_size=16, validation_data=(X_test, y_test))
predictions = nn_model.predict(X_test)
accuracy = accuracy_score(y_test, predictions.round())
precision = precision_score(y_test, predictions.round())
recall = recall_score(y_test, predictions.round())
f1 = f1_score(y_test, predictions.round())
comparison = comparison.append({'Model': 'NN', 'Accuracy': accuracy, 'Precision': precision, 'Recall': recall, 'F1': f1}, ignore_index=True)
from sklearn.model_selection import RandomizedSearchCV
from sklearn.ensemble import RandomForestClassifier
n_estimators = [int(x) for x in np.linspace(start=200, stop=2000, num=10)]
max_features = ['auto', 'sqrt']
max_depth = [int(x) for x in np.linspace(10, 110, num=11)]
max_depth.append(None)
min_samples_split = [2, 5, 10]
min_samples_leaf = [1, 2, 4]
bootstrap = [True, False]
random_grid = {'n_estimators': n_estimators, 'max_features': max_features, 'max_depth': max_depth, 'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf, 'bootstrap': bootstrap}
rf_model = RandomForestClassifier()
rf_model.fit(X_train, y_train)
rf_random = RandomizedSearchCV(estimator=rf_model, param_distributions=random_grid, n_iter=100, cv=3, verbose=2, random_state=42, n_jobs=-1)
rf_random.fit(X_train, y_train)
best_random = rf_random.best_estimator_
predictions = best_random.predict(X_test)
accuracy = accuracy_score(y_test, predictions)
precision = precision_score(y_test, predictions)
recall = recall_score(y_test, predictions)
f1 = f1_score(y_test, predictions)
comparison = comparison.append({'Model': 'RF', 'Accuracy': accuracy, 'Precision': precision, 'Recall': recall, 'F1': f1}, ignore_index=True)
print(f'Accuracy with RF: {accuracy}')
print(f'Precision with RF: {precision}')
print(f'Recall with RF: {recall}')
print(f'F1 with RF: {f1}') | code |
16120680/cell_2 | [
"text_plain_output_1.png"
] | import os
import string
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
print(os.listdir('../input')) | code |
16120680/cell_11 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import string
df = pd.read_csv('../input/train.csv')
df['CabinPrefix'] = [str(cabinname)[0] for cabinname in df.Cabin]
df.loc[df.Cabin.isnull(), 'CabinPrefix'] = None
df['CabinKnown'] = [value for value in df.Cabin.isnull()]
df['TicketSplitLen'] = [len(t.split()) for t in df.Ticket]
df['Sex_Ind'] = -1
df.loc[df.Sex == 'female', 'Sex_Ind'] = 1
df.loc[df.Sex == 'male', 'Sex_Ind'] = 2
df['Age'] = df.Age.fillna(0)
cols = ['Pclass','Sex', 'SibSp', 'Parch', 'Embarked', 'CabinPrefix', 'TicketSplitLen', 'CabinKnown']
for col in cols:
q = df.groupby(col).Survived.sum()
t = df.groupby(col).Survived.count()
fig, ax = plt.subplots()
pos = [i for i,name in enumerate(q.index)]
vals = [name for i,name in enumerate(q.index)]
ax.barh(pos, t, color='r', label='died')
ax.barh(pos, q, label='survived')
ax.set_yticks(pos)
ax.set_yticklabels(vals)
ax.set_ylabel(col)
ax.legend()
letter_map = {}
for i, letter in enumerate(list(string.ascii_lowercase.upper())):
letter_map[letter] = i + 1
letter_map[None] = -1
df['CabinPrefixInd'] = [letter_map[cabin_prefix] for cabin_prefix in df.CabinPrefix]
from sklearn.model_selection import train_test_split
X = np.array(df[['Pclass', 'Age', 'SibSp', 'Parch', 'Fare', 'TicketSplitLen', 'Sex_Ind', 'CabinPrefixInd']])
y = np.array(df.Survived)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
X_train.shape | code |
16120680/cell_19 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import string
df = pd.read_csv('../input/train.csv')
df['CabinPrefix'] = [str(cabinname)[0] for cabinname in df.Cabin]
df.loc[df.Cabin.isnull(), 'CabinPrefix'] = None
df['CabinKnown'] = [value for value in df.Cabin.isnull()]
df['TicketSplitLen'] = [len(t.split()) for t in df.Ticket]
df['Sex_Ind'] = -1
df.loc[df.Sex == 'female', 'Sex_Ind'] = 1
df.loc[df.Sex == 'male', 'Sex_Ind'] = 2
df['Age'] = df.Age.fillna(0)
cols = ['Pclass','Sex', 'SibSp', 'Parch', 'Embarked', 'CabinPrefix', 'TicketSplitLen', 'CabinKnown']
for col in cols:
q = df.groupby(col).Survived.sum()
t = df.groupby(col).Survived.count()
fig, ax = plt.subplots()
pos = [i for i,name in enumerate(q.index)]
vals = [name for i,name in enumerate(q.index)]
ax.barh(pos, t, color='r', label='died')
ax.barh(pos, q, label='survived')
ax.set_yticks(pos)
ax.set_yticklabels(vals)
ax.set_ylabel(col)
ax.legend()
letter_map = {}
for i, letter in enumerate(list(string.ascii_lowercase.upper())):
letter_map[letter] = i + 1
letter_map[None] = -1
df['CabinPrefixInd'] = [letter_map[cabin_prefix] for cabin_prefix in df.CabinPrefix]
from sklearn.model_selection import train_test_split
X = np.array(df[['Pclass', 'Age', 'SibSp', 'Parch', 'Fare', 'TicketSplitLen', 'Sex_Ind', 'CabinPrefixInd']])
y = np.array(df.Survived)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
X_train.shape
from sklearn.model_selection import RandomizedSearchCV
from sklearn.ensemble import RandomForestClassifier
n_estimators = [int(x) for x in np.linspace(start=200, stop=2000, num=10)]
max_features = ['auto', 'sqrt']
max_depth = [int(x) for x in np.linspace(10, 110, num=11)]
max_depth.append(None)
min_samples_split = [2, 5, 10]
min_samples_leaf = [1, 2, 4]
bootstrap = [True, False]
random_grid = {'n_estimators': n_estimators, 'max_features': max_features, 'max_depth': max_depth, 'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf, 'bootstrap': bootstrap}
rf_model = RandomForestClassifier()
rf_model.fit(X_train, y_train)
rf_random = RandomizedSearchCV(estimator=rf_model, param_distributions=random_grid, n_iter=100, cv=3, verbose=2, random_state=42, n_jobs=-1)
rf_random.fit(X_train, y_train)
best_random = rf_random.best_estimator_ | code |
16120680/cell_7 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv')
df['CabinPrefix'] = [str(cabinname)[0] for cabinname in df.Cabin]
df.loc[df.Cabin.isnull(), 'CabinPrefix'] = None
df['CabinKnown'] = [value for value in df.Cabin.isnull()]
df['TicketSplitLen'] = [len(t.split()) for t in df.Ticket]
df['Sex_Ind'] = -1
df.loc[df.Sex == 'female', 'Sex_Ind'] = 1
df.loc[df.Sex == 'male', 'Sex_Ind'] = 2
df['Age'] = df.Age.fillna(0)
cols = ['Pclass', 'Sex', 'SibSp', 'Parch', 'Embarked', 'CabinPrefix', 'TicketSplitLen', 'CabinKnown']
for col in cols:
q = df.groupby(col).Survived.sum()
t = df.groupby(col).Survived.count()
fig, ax = plt.subplots()
pos = [i for i, name in enumerate(q.index)]
vals = [name for i, name in enumerate(q.index)]
ax.barh(pos, t, color='r', label='died')
ax.barh(pos, q, label='survived')
ax.set_yticks(pos)
ax.set_yticklabels(vals)
ax.set_ylabel(col)
ax.legend() | code |
16120680/cell_15 | [
"image_output_5.png",
"image_output_7.png",
"image_output_4.png",
"image_output_8.png",
"image_output_6.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.models import Sequential
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
nn_model = Sequential()
nn_model.add(Dense(16, activation='relu', input_shape=(8,)))
nn_model.add(Dropout(0.3, noise_shape=None, seed=None))
nn_model.add(Dense(32, activation='relu'))
nn_model.add(Dropout(0.3, noise_shape=None, seed=None))
nn_model.add(Dense(64, activation='relu'))
nn_model.add(Dropout(0.2, noise_shape=None, seed=None))
nn_model.add(Dense(1, activation='sigmoid'))
nn_model.summary()
nn_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) | code |
16120680/cell_16 | [
"text_html_output_1.png"
] | from sklearn.model_selection import train_test_split
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.models import Sequential
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import string
df = pd.read_csv('../input/train.csv')
df['CabinPrefix'] = [str(cabinname)[0] for cabinname in df.Cabin]
df.loc[df.Cabin.isnull(), 'CabinPrefix'] = None
df['CabinKnown'] = [value for value in df.Cabin.isnull()]
df['TicketSplitLen'] = [len(t.split()) for t in df.Ticket]
df['Sex_Ind'] = -1
df.loc[df.Sex == 'female', 'Sex_Ind'] = 1
df.loc[df.Sex == 'male', 'Sex_Ind'] = 2
df['Age'] = df.Age.fillna(0)
cols = ['Pclass','Sex', 'SibSp', 'Parch', 'Embarked', 'CabinPrefix', 'TicketSplitLen', 'CabinKnown']
for col in cols:
q = df.groupby(col).Survived.sum()
t = df.groupby(col).Survived.count()
fig, ax = plt.subplots()
pos = [i for i,name in enumerate(q.index)]
vals = [name for i,name in enumerate(q.index)]
ax.barh(pos, t, color='r', label='died')
ax.barh(pos, q, label='survived')
ax.set_yticks(pos)
ax.set_yticklabels(vals)
ax.set_ylabel(col)
ax.legend()
letter_map = {}
for i, letter in enumerate(list(string.ascii_lowercase.upper())):
letter_map[letter] = i + 1
letter_map[None] = -1
df['CabinPrefixInd'] = [letter_map[cabin_prefix] for cabin_prefix in df.CabinPrefix]
from sklearn.model_selection import train_test_split
X = np.array(df[['Pclass', 'Age', 'SibSp', 'Parch', 'Fare', 'TicketSplitLen', 'Sex_Ind', 'CabinPrefixInd']])
y = np.array(df.Survived)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
X_train.shape
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
nn_model = Sequential()
nn_model.add(Dense(16, activation='relu', input_shape=(8,)))
nn_model.add(Dropout(0.3, noise_shape=None, seed=None))
nn_model.add(Dense(32, activation='relu'))
nn_model.add(Dropout(0.3, noise_shape=None, seed=None))
nn_model.add(Dense(64, activation='relu'))
nn_model.add(Dropout(0.2, noise_shape=None, seed=None))
nn_model.add(Dense(1, activation='sigmoid'))
nn_model.summary()
nn_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
results = nn_model.fit(X_train, y_train, epochs=200, batch_size=16, validation_data=(X_test, y_test)) | code |
16120680/cell_3 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv')
df.head() | code |
16120680/cell_17 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.model_selection import train_test_split
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.models import Sequential
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import string
df = pd.read_csv('../input/train.csv')
df['CabinPrefix'] = [str(cabinname)[0] for cabinname in df.Cabin]
df.loc[df.Cabin.isnull(), 'CabinPrefix'] = None
df['CabinKnown'] = [value for value in df.Cabin.isnull()]
df['TicketSplitLen'] = [len(t.split()) for t in df.Ticket]
df['Sex_Ind'] = -1
df.loc[df.Sex == 'female', 'Sex_Ind'] = 1
df.loc[df.Sex == 'male', 'Sex_Ind'] = 2
df['Age'] = df.Age.fillna(0)
cols = ['Pclass','Sex', 'SibSp', 'Parch', 'Embarked', 'CabinPrefix', 'TicketSplitLen', 'CabinKnown']
for col in cols:
q = df.groupby(col).Survived.sum()
t = df.groupby(col).Survived.count()
fig, ax = plt.subplots()
pos = [i for i,name in enumerate(q.index)]
vals = [name for i,name in enumerate(q.index)]
ax.barh(pos, t, color='r', label='died')
ax.barh(pos, q, label='survived')
ax.set_yticks(pos)
ax.set_yticklabels(vals)
ax.set_ylabel(col)
ax.legend()
letter_map = {}
for i, letter in enumerate(list(string.ascii_lowercase.upper())):
letter_map[letter] = i + 1
letter_map[None] = -1
df['CabinPrefixInd'] = [letter_map[cabin_prefix] for cabin_prefix in df.CabinPrefix]
from sklearn.model_selection import train_test_split
X = np.array(df[['Pclass', 'Age', 'SibSp', 'Parch', 'Fare', 'TicketSplitLen', 'Sex_Ind', 'CabinPrefixInd']])
y = np.array(df.Survived)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
X_train.shape
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
lr_model = LogisticRegression()
lr_model.fit(X_train, y_train)
predictions = lr_model.predict(X_test)
accuracy = accuracy_score(y_test, predictions)
precision = precision_score(y_test, predictions)
recall = recall_score(y_test, predictions)
f1 = f1_score(y_test, predictions)
parameters = lr_model.coef_
comparison = pd.DataFrame([['LR', accuracy, precision, recall, f1]], columns=['Model', 'Accuracy', 'Precision', 'Recall', 'F1'])
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
nn_model = Sequential()
nn_model.add(Dense(16, activation='relu', input_shape=(8,)))
nn_model.add(Dropout(0.3, noise_shape=None, seed=None))
nn_model.add(Dense(32, activation='relu'))
nn_model.add(Dropout(0.3, noise_shape=None, seed=None))
nn_model.add(Dense(64, activation='relu'))
nn_model.add(Dropout(0.2, noise_shape=None, seed=None))
nn_model.add(Dense(1, activation='sigmoid'))
nn_model.summary()
nn_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
results = nn_model.fit(X_train, y_train, epochs=200, batch_size=16, validation_data=(X_test, y_test))
predictions = nn_model.predict(X_test)
accuracy = accuracy_score(y_test, predictions.round())
precision = precision_score(y_test, predictions.round())
recall = recall_score(y_test, predictions.round())
f1 = f1_score(y_test, predictions.round())
comparison = comparison.append({'Model': 'NN', 'Accuracy': accuracy, 'Precision': precision, 'Recall': recall, 'F1': f1}, ignore_index=True)
print(f'Accuracy with NN: {accuracy}')
print(f'Precision with NN: {precision}')
print(f'Recall with NN: {recall}')
print(f'F1 with NN: {f1}') | code |
50242100/cell_9 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/titanic/train.csv')
df.shape
df.isna().count()
df.describe() | code |
50242100/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
50242100/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/titanic/train.csv')
df.shape | code |
50242100/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/titanic/train.csv')
df.shape
df.isna().count() | code |
50242100/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/titanic/train.csv')
df.head() | code |
16120872/cell_21 | [
"text_plain_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import seaborn as sns
import missingno
pd.set_option('display.max_columns', 1000)
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = 'all'
df = pd.read_csv('../input/train.csv')
d_test = pd.read_csv('../input/test.csv')
df.isna().sum().sort_values(ascending=False)[:20]
for c in df.columns:
if df[c].dtypes == 'O':
df[c].fillna(value='none', inplace=True)
else:
df[c].fillna(value=0, inplace=True)
objects_list = ['MSSubClass']
linkert_list = ['ExterQual', 'ExterCond', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'HeatingQC', 'KitchenQual', 'Functional', 'FireplaceQu', 'GarageQual', 'GarageCond', 'PavedDrive', 'Fence']
for c in df.columns:
if df[c].dtypes == 'O':
d = df.set_index(c)['SalePrice'].groupby(axis=0, level=0).mean()
d = round(d / df['SalePrice'].mean() * 100, 1)
d.name = 'Mean'
b = df.set_index(c)['SalePrice'].groupby(axis=0, level=0).std()
b = round(b / df['SalePrice'].std() * 100, 1)
b.name = 'std'
a = []
for c in linkert_list:
a.append(df[c].unique().tolist())
label_values = pd.DataFrame(a).T
label_values | code |
16120872/cell_6 | [
"text_html_output_1.png"
] | import missingno
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import seaborn as sns
import missingno
pd.set_option('display.max_columns', 1000)
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = 'all'
df = pd.read_csv('../input/train.csv')
d_test = pd.read_csv('../input/test.csv')
missingno.matrix(df, figsize=(30, 5)) | code |
16120872/cell_11 | [
"text_html_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import seaborn as sns
import missingno
pd.set_option('display.max_columns', 1000)
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = 'all'
df = pd.read_csv('../input/train.csv')
d_test = pd.read_csv('../input/test.csv')
df.isna().sum().sort_values(ascending=False)[:20]
for c in df.columns:
if df[c].dtypes == 'O':
df[c].fillna(value='none', inplace=True)
else:
df[c].fillna(value=0, inplace=True)
df.head() | code |
16120872/cell_1 | [
"text_plain_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import os
print(os.listdir('../input'))
import matplotlib.pyplot as plt
import seaborn as sns
import missingno
pd.set_option('display.max_columns', 1000)
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = 'all' | code |
16120872/cell_7 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import seaborn as sns
import missingno
pd.set_option('display.max_columns', 1000)
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = 'all'
df = pd.read_csv('../input/train.csv')
d_test = pd.read_csv('../input/test.csv')
df.isna().sum().sort_values(ascending=False)[:20] | code |
16120872/cell_15 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import seaborn as sns
import missingno
pd.set_option('display.max_columns', 1000)
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = 'all'
df = pd.read_csv('../input/train.csv')
d_test = pd.read_csv('../input/test.csv')
df.isna().sum().sort_values(ascending=False)[:20]
for c in df.columns:
if df[c].dtypes == 'O':
df[c].fillna(value='none', inplace=True)
else:
df[c].fillna(value=0, inplace=True)
df.iloc[:, -1].head() | code |
16120872/cell_3 | [
"text_plain_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import seaborn as sns
import missingno
pd.set_option('display.max_columns', 1000)
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = 'all'
df = pd.read_csv('../input/train.csv')
d_test = pd.read_csv('../input/test.csv')
df.head() | code |
16120872/cell_22 | [
"text_html_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import seaborn as sns
import missingno
pd.set_option('display.max_columns', 1000)
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = 'all'
df = pd.read_csv('../input/train.csv')
d_test = pd.read_csv('../input/test.csv')
df.isna().sum().sort_values(ascending=False)[:20]
for c in df.columns:
if df[c].dtypes == 'O':
df[c].fillna(value='none', inplace=True)
else:
df[c].fillna(value=0, inplace=True)
objects_list = ['MSSubClass']
linkert_list = ['ExterQual', 'ExterCond', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'HeatingQC', 'KitchenQual', 'Functional', 'FireplaceQu', 'GarageQual', 'GarageCond', 'PavedDrive', 'Fence']
for c in df.columns:
if df[c].dtypes == 'O':
d = df.set_index(c)['SalePrice'].groupby(axis=0, level=0).mean()
d = round(d / df['SalePrice'].mean() * 100, 1)
d.name = 'Mean'
b = df.set_index(c)['SalePrice'].groupby(axis=0, level=0).std()
b = round(b / df['SalePrice'].std() * 100, 1)
b.name = 'std'
a = []
for c in linkert_list:
a.append(df[c].unique().tolist())
label_values = pd.DataFrame(a).T
label_values
label_loc = [4, 7, 8, 11, 12]
label_values[label_loc] | code |
16120872/cell_36 | [
"text_html_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_estimators=100)
clf.fit(x_train, y_train)
clf.score(x_train, y_train) | code |
89131213/cell_4 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
import ast
data = pd.read_csv('example_data/Belgium_labeled.csv', keep_default_na=False)[['text', 'label']] | code |
89127563/cell_23 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
test = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
sample_submission = pd.read_csv('/kaggle/input/spaceship-titanic/sample_submission.csv')
train.sample(20)
test.sample(20)
test.dtypes | code |
89127563/cell_30 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
test = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
sample_submission = pd.read_csv('/kaggle/input/spaceship-titanic/sample_submission.csv')
train.sample(20)
test.sample(20)
train.dtypes
test.dtypes
missing_values = pd.concat([train.drop(columns=['Transported']).isnull().sum(), test.isnull().sum()], axis=1)
missing_values.columns = ['Number of missing value (train)', 'Number of missing value (test)']
missing_values['% of missing value (train)'] = 100 * missing_values['Number of missing value (train)'] / train.shape[0]
missing_values['% of missing value (test)'] = 100 * missing_values['Number of missing value (test)'] / test.shape[0]
missing_values
cardinality = pd.concat([train.drop(columns=['Transported']).nunique(), test.nunique()], axis=1)
cardinality.columns = ['Number of unique values (train)', 'Number of unique values (test)']
cardinality | code |
89127563/cell_20 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
test = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
sample_submission = pd.read_csv('/kaggle/input/spaceship-titanic/sample_submission.csv')
train.sample(20)
test.sample(20)
print(f'Data size:\n- Training data: Number of row = {train.shape[0]}, Number of columns = {train.shape[1]}\n- Test data : Number of row = {test.shape[0]}, Number of columns = {test.shape[1]}') | code |
89127563/cell_26 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
test = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
sample_submission = pd.read_csv('/kaggle/input/spaceship-titanic/sample_submission.csv')
train.sample(20)
test.sample(20)
train.dtypes
test.dtypes
missing_values = pd.concat([train.drop(columns=['Transported']).isnull().sum(), test.isnull().sum()], axis=1)
missing_values.columns = ['Number of missing value (train)', 'Number of missing value (test)']
missing_values['% of missing value (train)'] = 100 * missing_values['Number of missing value (train)'] / train.shape[0]
missing_values['% of missing value (test)'] = 100 * missing_values['Number of missing value (test)'] / test.shape[0]
missing_values | code |
89127563/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
test = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
sample_submission = pd.read_csv('/kaggle/input/spaceship-titanic/sample_submission.csv')
test.sample(20) | code |
89127563/cell_32 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
test = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
sample_submission = pd.read_csv('/kaggle/input/spaceship-titanic/sample_submission.csv')
train.sample(20)
test.sample(20)
train.dtypes
test.dtypes
missing_values = pd.concat([train.drop(columns=['Transported']).isnull().sum(), test.isnull().sum()], axis=1)
missing_values.columns = ['Number of missing value (train)', 'Number of missing value (test)']
missing_values['% of missing value (train)'] = 100 * missing_values['Number of missing value (train)'] / train.shape[0]
missing_values['% of missing value (test)'] = 100 * missing_values['Number of missing value (test)'] / test.shape[0]
missing_values
cardinality = pd.concat([train.drop(columns=['Transported']).nunique(), test.nunique()], axis=1)
cardinality.columns = ['Number of unique values (train)', 'Number of unique values (test)']
cardinality
train['Cabin'] | code |
89127563/cell_14 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
test = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
sample_submission = pd.read_csv('/kaggle/input/spaceship-titanic/sample_submission.csv')
train.sample(20)
train['Transported'].value_counts(dropna=False).reset_index().rename(columns={'index': 'Transported', 'Transported': 'Number of rows'}) | code |
89127563/cell_22 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
test = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
sample_submission = pd.read_csv('/kaggle/input/spaceship-titanic/sample_submission.csv')
train.sample(20)
test.sample(20)
train.dtypes | code |
89127563/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
test = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
sample_submission = pd.read_csv('/kaggle/input/spaceship-titanic/sample_submission.csv')
train.sample(20) | code |
89127563/cell_5 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
73095137/cell_21 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/airline-passenger-satisfaction/train.csv')
test = pd.read_csv('../input/airline-passenger-satisfaction/test.csv')
(df.shape, test.shape)
df_all = df.append(test)
df_all.shape
df_all.head(10).T
df_all.columns = [c.replace(' ', '_') for c in df_all.columns]
df_all.isna().sum()
df_all = df_all.drop('Unnamed:_0', axis=1)
df_all = df_all.drop('id', axis=1)
df_all.describe().round(2)
import seaborn as sns
import matplotlib.pyplot as plt
plt.tight_layout
# Checando o desbalanceamento
fig = plt.figure(figsize = (8,5))
df_all.satisfaction.value_counts(normalize = True).plot(kind='bar', color= ['darkorange','steelblue'], alpha = 0.9, rot=0)
plt.show()
df_all = pd.get_dummies(df_all, columns=['Gender', 'Customer_Type', 'Type_of_Travel', 'Class'])
df_all | code |
73095137/cell_13 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/airline-passenger-satisfaction/train.csv')
test = pd.read_csv('../input/airline-passenger-satisfaction/test.csv')
(df.shape, test.shape)
df_all = df.append(test)
df_all.shape
df_all.head(10).T
df_all.columns = [c.replace(' ', '_') for c in df_all.columns]
df_all.isna().sum()
df_all = df_all.drop('Unnamed:_0', axis=1)
df_all = df_all.drop('id', axis=1)
df_all.describe().round(2)
import seaborn as sns
import matplotlib.pyplot as plt
plt.tight_layout
df_all.select_dtypes('object').head() | code |
73095137/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/airline-passenger-satisfaction/train.csv')
test = pd.read_csv('../input/airline-passenger-satisfaction/test.csv')
(df.shape, test.shape)
df_all = df.append(test)
df_all.shape
df_all.head(10).T
df_all.columns = [c.replace(' ', '_') for c in df_all.columns]
df_all.isna().sum()
df_all = df_all.drop('Unnamed:_0', axis=1)
df_all = df_all.drop('id', axis=1)
df_all.describe().round(2) | code |
73095137/cell_25 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/airline-passenger-satisfaction/train.csv')
test = pd.read_csv('../input/airline-passenger-satisfaction/test.csv')
(df.shape, test.shape)
df_all = df.append(test)
df_all.shape
df_all.head(10).T
df_all.columns = [c.replace(' ', '_') for c in df_all.columns]
df_all.isna().sum()
df_all = df_all.drop('Unnamed:_0', axis=1)
df_all = df_all.drop('id', axis=1)
df_all.describe().round(2)
import seaborn as sns
import matplotlib.pyplot as plt
plt.tight_layout
# Checando o desbalanceamento
fig = plt.figure(figsize = (8,5))
df_all.satisfaction.value_counts(normalize = True).plot(kind='bar', color= ['darkorange','steelblue'], alpha = 0.9, rot=0)
plt.show()
df_all = pd.get_dummies(df_all, columns=['Gender', 'Customer_Type', 'Type_of_Travel', 'Class'])
df_all
df_all.head().T
feats = [c for c in df_all.columns if c not in ['satisfaction']]
feats | code |
73095137/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/airline-passenger-satisfaction/train.csv')
test = pd.read_csv('../input/airline-passenger-satisfaction/test.csv')
(df.shape, test.shape)
df_all = df.append(test)
df_all.shape
df_all.info() | code |
73095137/cell_23 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/airline-passenger-satisfaction/train.csv')
test = pd.read_csv('../input/airline-passenger-satisfaction/test.csv')
(df.shape, test.shape)
df_all = df.append(test)
df_all.shape
df_all.head(10).T
df_all.columns = [c.replace(' ', '_') for c in df_all.columns]
df_all.isna().sum()
df_all = df_all.drop('Unnamed:_0', axis=1)
df_all = df_all.drop('id', axis=1)
df_all.describe().round(2)
import seaborn as sns
import matplotlib.pyplot as plt
plt.tight_layout
# Checando o desbalanceamento
fig = plt.figure(figsize = (8,5))
df_all.satisfaction.value_counts(normalize = True).plot(kind='bar', color= ['darkorange','steelblue'], alpha = 0.9, rot=0)
plt.show()
df_all = pd.get_dummies(df_all, columns=['Gender', 'Customer_Type', 'Type_of_Travel', 'Class'])
df_all
df_all.head().T
df_all.info() | code |
73095137/cell_20 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/airline-passenger-satisfaction/train.csv')
test = pd.read_csv('../input/airline-passenger-satisfaction/test.csv')
(df.shape, test.shape)
df_all = df.append(test)
df_all.shape
df_all.head(10).T
df_all.columns = [c.replace(' ', '_') for c in df_all.columns]
df_all.isna().sum()
df_all = df_all.drop('Unnamed:_0', axis=1)
df_all = df_all.drop('id', axis=1)
df_all.describe().round(2)
import seaborn as sns
import matplotlib.pyplot as plt
plt.tight_layout
fig = plt.figure(figsize=(8, 5))
df_all.satisfaction.value_counts(normalize=True).plot(kind='bar', color=['darkorange', 'steelblue'], alpha=0.9, rot=0)
plt.show() | code |
73095137/cell_2 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/airline-passenger-satisfaction/train.csv')
test = pd.read_csv('../input/airline-passenger-satisfaction/test.csv')
(df.shape, test.shape) | code |
73095137/cell_11 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/airline-passenger-satisfaction/train.csv')
test = pd.read_csv('../input/airline-passenger-satisfaction/test.csv')
(df.shape, test.shape)
df_all = df.append(test)
df_all.shape
df_all.head(10).T
df_all.columns = [c.replace(' ', '_') for c in df_all.columns]
df_all.isna().sum()
df_all = df_all.drop('Unnamed:_0', axis=1)
df_all = df_all.drop('id', axis=1)
df_all.describe().round(2)
import seaborn as sns
import matplotlib.pyplot as plt
plt.figure(figsize=(20, 15))
sns.heatmap(df_all.corr(), annot=True, cmap='coolwarm')
plt.tight_layout | code |
73095137/cell_19 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/airline-passenger-satisfaction/train.csv')
test = pd.read_csv('../input/airline-passenger-satisfaction/test.csv')
(df.shape, test.shape)
df_all = df.append(test)
df_all.shape
df_all.head(10).T
df_all.columns = [c.replace(' ', '_') for c in df_all.columns]
df_all.isna().sum()
df_all = df_all.drop('Unnamed:_0', axis=1)
df_all = df_all.drop('id', axis=1)
df_all.describe().round(2)
import seaborn as sns
import matplotlib.pyplot as plt
plt.tight_layout
df_all['satisfaction'].value_counts() | code |
73095137/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
73095137/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/airline-passenger-satisfaction/train.csv')
test = pd.read_csv('../input/airline-passenger-satisfaction/test.csv')
(df.shape, test.shape)
df_all = df.append(test)
df_all.shape
df_all.head(10).T
df_all.columns = [c.replace(' ', '_') for c in df_all.columns]
df_all.isna().sum() | code |
73095137/cell_18 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/airline-passenger-satisfaction/train.csv')
test = pd.read_csv('../input/airline-passenger-satisfaction/test.csv')
(df.shape, test.shape)
df_all = df.append(test)
df_all.shape
df_all.head(10).T
df_all.columns = [c.replace(' ', '_') for c in df_all.columns]
df_all.isna().sum()
df_all = df_all.drop('Unnamed:_0', axis=1)
df_all = df_all.drop('id', axis=1)
df_all.describe().round(2)
import seaborn as sns
import matplotlib.pyplot as plt
plt.tight_layout
df_all['Class'].value_counts() | code |
73095137/cell_28 | [
"text_html_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/airline-passenger-satisfaction/train.csv')
test = pd.read_csv('../input/airline-passenger-satisfaction/test.csv')
(df.shape, test.shape)
df_all = df.append(test)
df_all.shape
df_all.head(10).T
df_all.columns = [c.replace(' ', '_') for c in df_all.columns]
df_all.isna().sum()
df_all = df_all.drop('Unnamed:_0', axis=1)
df_all = df_all.drop('id', axis=1)
df_all.describe().round(2)
import seaborn as sns
import matplotlib.pyplot as plt
plt.tight_layout
# Checando o desbalanceamento
fig = plt.figure(figsize = (8,5))
df_all.satisfaction.value_counts(normalize = True).plot(kind='bar', color= ['darkorange','steelblue'], alpha = 0.9, rot=0)
plt.show()
df_all = pd.get_dummies(df_all, columns=['Gender', 'Customer_Type', 'Type_of_Travel', 'Class'])
df_all
df_all.head().T
from sklearn.model_selection import train_test_split
train, test = train_test_split(df_all, test_size=0.2, random_state=42)
train, valid = train_test_split(train, test_size=0.2, random_state=42)
(train.shape, valid.shape, test.shape)
feats = [c for c in df_all.columns if c not in ['satisfaction']]
feats
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(n_estimators=200, random_state=42, n_jobs=-1)
rf.fit(train[feats], train['satisfaction'])
preds_val = rf.predict(valid[feats])
preds_val | code |
73095137/cell_15 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/airline-passenger-satisfaction/train.csv')
test = pd.read_csv('../input/airline-passenger-satisfaction/test.csv')
(df.shape, test.shape)
df_all = df.append(test)
df_all.shape
df_all.head(10).T
df_all.columns = [c.replace(' ', '_') for c in df_all.columns]
df_all.isna().sum()
df_all = df_all.drop('Unnamed:_0', axis=1)
df_all = df_all.drop('id', axis=1)
df_all.describe().round(2)
import seaborn as sns
import matplotlib.pyplot as plt
plt.tight_layout
df_all['Customer_Type'].value_counts() | code |
73095137/cell_16 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/airline-passenger-satisfaction/train.csv')
test = pd.read_csv('../input/airline-passenger-satisfaction/test.csv')
(df.shape, test.shape)
df_all = df.append(test)
df_all.shape
df_all.head(10).T
df_all.columns = [c.replace(' ', '_') for c in df_all.columns]
df_all.isna().sum()
df_all = df_all.drop('Unnamed:_0', axis=1)
df_all = df_all.drop('id', axis=1)
df_all.describe().round(2)
import seaborn as sns
import matplotlib.pyplot as plt
plt.tight_layout
df_all['Gender'].value_counts() | code |
73095137/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/airline-passenger-satisfaction/train.csv')
test = pd.read_csv('../input/airline-passenger-satisfaction/test.csv')
(df.shape, test.shape)
df_all = df.append(test)
df_all.shape | code |
73095137/cell_17 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/airline-passenger-satisfaction/train.csv')
test = pd.read_csv('../input/airline-passenger-satisfaction/test.csv')
(df.shape, test.shape)
df_all = df.append(test)
df_all.shape
df_all.head(10).T
df_all.columns = [c.replace(' ', '_') for c in df_all.columns]
df_all.isna().sum()
df_all = df_all.drop('Unnamed:_0', axis=1)
df_all = df_all.drop('id', axis=1)
df_all.describe().round(2)
import seaborn as sns
import matplotlib.pyplot as plt
plt.tight_layout
df_all['Type_of_Travel'].value_counts() | code |
73095137/cell_24 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/airline-passenger-satisfaction/train.csv')
test = pd.read_csv('../input/airline-passenger-satisfaction/test.csv')
(df.shape, test.shape)
df_all = df.append(test)
df_all.shape
df_all.head(10).T
df_all.columns = [c.replace(' ', '_') for c in df_all.columns]
df_all.isna().sum()
df_all = df_all.drop('Unnamed:_0', axis=1)
df_all = df_all.drop('id', axis=1)
df_all.describe().round(2)
import seaborn as sns
import matplotlib.pyplot as plt
plt.tight_layout
# Checando o desbalanceamento
fig = plt.figure(figsize = (8,5))
df_all.satisfaction.value_counts(normalize = True).plot(kind='bar', color= ['darkorange','steelblue'], alpha = 0.9, rot=0)
plt.show()
df_all = pd.get_dummies(df_all, columns=['Gender', 'Customer_Type', 'Type_of_Travel', 'Class'])
df_all
df_all.head().T
from sklearn.model_selection import train_test_split
train, test = train_test_split(df_all, test_size=0.2, random_state=42)
train, valid = train_test_split(train, test_size=0.2, random_state=42)
(train.shape, valid.shape, test.shape) | code |
73095137/cell_22 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/airline-passenger-satisfaction/train.csv')
test = pd.read_csv('../input/airline-passenger-satisfaction/test.csv')
(df.shape, test.shape)
df_all = df.append(test)
df_all.shape
df_all.head(10).T
df_all.columns = [c.replace(' ', '_') for c in df_all.columns]
df_all.isna().sum()
df_all = df_all.drop('Unnamed:_0', axis=1)
df_all = df_all.drop('id', axis=1)
df_all.describe().round(2)
import seaborn as sns
import matplotlib.pyplot as plt
plt.tight_layout
# Checando o desbalanceamento
fig = plt.figure(figsize = (8,5))
df_all.satisfaction.value_counts(normalize = True).plot(kind='bar', color= ['darkorange','steelblue'], alpha = 0.9, rot=0)
plt.show()
df_all = pd.get_dummies(df_all, columns=['Gender', 'Customer_Type', 'Type_of_Travel', 'Class'])
df_all
df_all.head().T | code |
73095137/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/airline-passenger-satisfaction/train.csv')
test = pd.read_csv('../input/airline-passenger-satisfaction/test.csv')
(df.shape, test.shape)
df_all = df.append(test)
df_all.shape
df_all.head(10).T
df_all.columns = [c.replace(' ', '_') for c in df_all.columns]
df_all.isna().sum()
df_all = df_all.drop('Unnamed:_0', axis=1)
df_all = df_all.drop('id', axis=1)
df_all.describe().round(2)
df_all['Arrival_Delay_in_Minutes'] = df_all['Arrival_Delay_in_Minutes'].fillna(df_all['Arrival_Delay_in_Minutes'].median())
df_all | code |
73095137/cell_27 | [
"text_html_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/airline-passenger-satisfaction/train.csv')
test = pd.read_csv('../input/airline-passenger-satisfaction/test.csv')
(df.shape, test.shape)
df_all = df.append(test)
df_all.shape
df_all.head(10).T
df_all.columns = [c.replace(' ', '_') for c in df_all.columns]
df_all.isna().sum()
df_all = df_all.drop('Unnamed:_0', axis=1)
df_all = df_all.drop('id', axis=1)
df_all.describe().round(2)
import seaborn as sns
import matplotlib.pyplot as plt
plt.tight_layout
# Checando o desbalanceamento
fig = plt.figure(figsize = (8,5))
df_all.satisfaction.value_counts(normalize = True).plot(kind='bar', color= ['darkorange','steelblue'], alpha = 0.9, rot=0)
plt.show()
df_all = pd.get_dummies(df_all, columns=['Gender', 'Customer_Type', 'Type_of_Travel', 'Class'])
df_all
df_all.head().T
from sklearn.model_selection import train_test_split
train, test = train_test_split(df_all, test_size=0.2, random_state=42)
train, valid = train_test_split(train, test_size=0.2, random_state=42)
(train.shape, valid.shape, test.shape)
feats = [c for c in df_all.columns if c not in ['satisfaction']]
feats
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(n_estimators=200, random_state=42, n_jobs=-1)
rf.fit(train[feats], train['satisfaction']) | code |
73095137/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/airline-passenger-satisfaction/train.csv')
test = pd.read_csv('../input/airline-passenger-satisfaction/test.csv')
(df.shape, test.shape)
df_all = df.append(test)
df_all.shape
df_all.head(10).T | code |
1003657/cell_9 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
directory_data = pd.read_csv('../input/directory.csv')
plt.figure(figsize=(13, 5))
directory_data['Country'].value_counts().head(15).plot(kind='bar') | code |
1003657/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
directory_data = pd.read_csv('../input/directory.csv')
directory_data.head() | code |
1003657/cell_2 | [
"text_html_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
1003657/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
directory_data = pd.read_csv('../input/directory.csv')
sns.countplot(data=directory_data, x='Brand') | code |
1003657/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
directory_data = pd.read_csv('../input/directory.csv')
directory_data['Brand'].value_counts() | code |
1003657/cell_10 | [
"text_plain_output_1.png"
] | !pip install geoplotlib | code |
1003657/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
directory_data = pd.read_csv('../input/directory.csv')
directory_data.describe() | code |
128032771/cell_21 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import os
import random
import seaborn as sns
def seed_everything(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
seed_everything(42)
fig, axes = plt.subplots(2,3, figsize = (10,10))
sns.boxplot(y = train['Age'], ax = axes[0][0])
sns.boxplot(y = train['Height'], ax = axes[0][1])
sns.boxplot(y = train['Weight'], ax = axes[0][2])
sns.boxplot(y = train['Duration'], ax = axes[1][0])
sns.boxplot(y = train['Heart_Rate'], ax = axes[1][1])
sns.boxplot(y = train['Body_Temp'],ax = axes[1][2])
plt.tight_layout()
plt.show()
plt.figure(figsize=(10, 10))
mask = np.zeros_like(train.corr())
mask[np.triu_indices_from(mask)] = True
sns.heatmap(train.corr(), mask=mask, annot=True, cmap='Blues')
plt.show() | code |
128032771/cell_19 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import seaborn as sns
fig, axes = plt.subplots(2,3, figsize = (10,10))
sns.boxplot(y = train['Age'], ax = axes[0][0])
sns.boxplot(y = train['Height'], ax = axes[0][1])
sns.boxplot(y = train['Weight'], ax = axes[0][2])
sns.boxplot(y = train['Duration'], ax = axes[1][0])
sns.boxplot(y = train['Heart_Rate'], ax = axes[1][1])
sns.boxplot(y = train['Body_Temp'],ax = axes[1][2])
plt.tight_layout()
plt.show()
sns.displot(data=train, x='Body_Temp', kde=True) | code |
128032771/cell_7 | [
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | import pandas as pd
exercise = pd.read_csv('/kaggle/input/fmendesdat263xdemos/exercise.csv')
calories = pd.read_csv('/kaggle/input/fmendesdat263xdemos/calories.csv')
exercise['Calories_Burned'] = calories['Calories']
exercise = exercise.drop(['User_ID'], axis=1)
exercise | code |
128032771/cell_18 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import seaborn as sns
fig, axes = plt.subplots(2,3, figsize = (10,10))
sns.boxplot(y = train['Age'], ax = axes[0][0])
sns.boxplot(y = train['Height'], ax = axes[0][1])
sns.boxplot(y = train['Weight'], ax = axes[0][2])
sns.boxplot(y = train['Duration'], ax = axes[1][0])
sns.boxplot(y = train['Heart_Rate'], ax = axes[1][1])
sns.boxplot(y = train['Body_Temp'],ax = axes[1][2])
plt.tight_layout()
plt.show()
sns.displot(data=train, x='Heart_Rate', kde=True) | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.