path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
121154415/cell_1
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns import sklearn import matplotlib.pyplot as plt from copy import deepcopy import warnings warnings.filterwarnings('ignore') import random random.seed(42) import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
121154415/cell_32
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns datapath = '/kaggle/input/amp-parkinsons-disease-progression-prediction/' df_proteins = pd.read_csv(f'{datapath}train_proteins.csv') df_peptides = pd.read_csv(f'{datapath}train_peptides.csv') df_cd = pd.read_csv(f'{datapath}train_clinical_data.csv') data = df_proteins.merge(df_peptides, on=['visit_id', 'visit_month', 'patient_id', 'UniProt'], how='left') data = data.merge(df_cd, on=['visit_id', 'visit_month', 'patient_id'], how='left') data color = 'RdYlGn' data['upd23b_clinical_state_on_medication'].unique()
code
121154415/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) datapath = '/kaggle/input/amp-parkinsons-disease-progression-prediction/' df_proteins = pd.read_csv(f'{datapath}train_proteins.csv') df_peptides = pd.read_csv(f'{datapath}train_peptides.csv') df_cd = pd.read_csv(f'{datapath}train_clinical_data.csv') df_proteins.info()
code
121154415/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) datapath = '/kaggle/input/amp-parkinsons-disease-progression-prediction/' df_proteins = pd.read_csv(f'{datapath}train_proteins.csv') df_peptides = pd.read_csv(f'{datapath}train_peptides.csv') df_cd = pd.read_csv(f'{datapath}train_clinical_data.csv') data = df_proteins.merge(df_peptides, on=['visit_id', 'visit_month', 'patient_id', 'UniProt'], how='left') data = data.merge(df_cd, on=['visit_id', 'visit_month', 'patient_id'], how='left') data
code
121154415/cell_38
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns datapath = '/kaggle/input/amp-parkinsons-disease-progression-prediction/' df_proteins = pd.read_csv(f'{datapath}train_proteins.csv') df_peptides = pd.read_csv(f'{datapath}train_peptides.csv') df_cd = pd.read_csv(f'{datapath}train_clinical_data.csv') data = df_proteins.merge(df_peptides, on=['visit_id', 'visit_month', 'patient_id', 'UniProt'], how='left') data = data.merge(df_cd, on=['visit_id', 'visit_month', 'patient_id'], how='left') data color = 'RdYlGn' data.loc[data.upd23b_clinical_state_on_medication == 'On', 'upd23b_clinical_state_on_medication'] = 1 data.loc[data.upd23b_clinical_state_on_medication == 'Off', 'upd23b_clinical_state_on_medication'] = 0 data['upd23b_clinical_state_on_medication'].value_counts()
code
121154415/cell_35
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns datapath = '/kaggle/input/amp-parkinsons-disease-progression-prediction/' df_proteins = pd.read_csv(f'{datapath}train_proteins.csv') df_peptides = pd.read_csv(f'{datapath}train_peptides.csv') df_cd = pd.read_csv(f'{datapath}train_clinical_data.csv') data = df_proteins.merge(df_peptides, on=['visit_id', 'visit_month', 'patient_id', 'UniProt'], how='left') data = data.merge(df_cd, on=['visit_id', 'visit_month', 'patient_id'], how='left') data color = 'RdYlGn' data.loc[data.upd23b_clinical_state_on_medication == 'On', 'upd23b_clinical_state_on_medication'] = 1 data.loc[data.upd23b_clinical_state_on_medication == 'Off', 'upd23b_clinical_state_on_medication'] = 0 data['upd23b_clinical_state_on_medication'].unique()
code
121154415/cell_24
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) datapath = '/kaggle/input/amp-parkinsons-disease-progression-prediction/' df_proteins = pd.read_csv(f'{datapath}train_proteins.csv') df_peptides = pd.read_csv(f'{datapath}train_peptides.csv') df_cd = pd.read_csv(f'{datapath}train_clinical_data.csv') data = df_proteins.merge(df_peptides, on=['visit_id', 'visit_month', 'patient_id', 'UniProt'], how='left') data = data.merge(df_cd, on=['visit_id', 'visit_month', 'patient_id'], how='left') data ppp = pd.DataFrame(df_proteins.groupby('UniProt').patient_id.nunique()).rename(columns={'patient_id': 'count_patient'}).reset_index().sort_values('count_patient', ascending=False) ppp.head(10)
code
121154415/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) datapath = '/kaggle/input/amp-parkinsons-disease-progression-prediction/' df_proteins = pd.read_csv(f'{datapath}train_proteins.csv') df_peptides = pd.read_csv(f'{datapath}train_peptides.csv') df_cd = pd.read_csv(f'{datapath}train_clinical_data.csv') df_cd.head()
code
121154415/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) datapath = '/kaggle/input/amp-parkinsons-disease-progression-prediction/' df_proteins = pd.read_csv(f'{datapath}train_proteins.csv') df_peptides = pd.read_csv(f'{datapath}train_peptides.csv') df_cd = pd.read_csv(f'{datapath}train_clinical_data.csv') df_cd.info()
code
121154415/cell_27
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns datapath = '/kaggle/input/amp-parkinsons-disease-progression-prediction/' df_proteins = pd.read_csv(f'{datapath}train_proteins.csv') df_peptides = pd.read_csv(f'{datapath}train_peptides.csv') df_cd = pd.read_csv(f'{datapath}train_clinical_data.csv') data = df_proteins.merge(df_peptides, on=['visit_id', 'visit_month', 'patient_id', 'UniProt'], how='left') data = data.merge(df_cd, on=['visit_id', 'visit_month', 'patient_id'], how='left') data color = 'RdYlGn' ppp = pd.DataFrame(df_proteins.groupby('UniProt').patient_id.nunique()).rename(columns={'patient_id': 'count_patient'}).reset_index().sort_values('count_patient', ascending=False) sns.histplot(ppp)
code
121154415/cell_37
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns datapath = '/kaggle/input/amp-parkinsons-disease-progression-prediction/' df_proteins = pd.read_csv(f'{datapath}train_proteins.csv') df_peptides = pd.read_csv(f'{datapath}train_peptides.csv') df_cd = pd.read_csv(f'{datapath}train_clinical_data.csv') data = df_proteins.merge(df_peptides, on=['visit_id', 'visit_month', 'patient_id', 'UniProt'], how='left') data = data.merge(df_cd, on=['visit_id', 'visit_month', 'patient_id'], how='left') data color = 'RdYlGn' data.loc[data.upd23b_clinical_state_on_medication == 'On', 'upd23b_clinical_state_on_medication'] = 1 data.loc[data.upd23b_clinical_state_on_medication == 'Off', 'upd23b_clinical_state_on_medication'] = 0 data['upd23b_clinical_state_on_medication'].mode()
code
121154415/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) datapath = '/kaggle/input/amp-parkinsons-disease-progression-prediction/' df_proteins = pd.read_csv(f'{datapath}train_proteins.csv') df_peptides = pd.read_csv(f'{datapath}train_peptides.csv') df_cd = pd.read_csv(f'{datapath}train_clinical_data.csv') df_proteins.head()
code
121154415/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) datapath = '/kaggle/input/amp-parkinsons-disease-progression-prediction/' df_proteins = pd.read_csv(f'{datapath}train_proteins.csv') df_peptides = pd.read_csv(f'{datapath}train_peptides.csv') df_cd = pd.read_csv(f'{datapath}train_clinical_data.csv') print('df_proteins: ', df_proteins.shape) print('df_peptides: ', df_peptides.shape) print('df_cd: ', df_cd.shape)
code
122248445/cell_13
[ "text_plain_output_1.png" ]
!pip install scikit-learn
code
122248445/cell_9
[ "text_plain_output_1.png" ]
import os import shutil MAIN_DIR = '/kaggle/input/bach-breast-cancer-histology-images/ICIAR2018_BACH_Challenge/ICIAR2018_BACH_Challenge/Photos' BASE_DIR = '/kaggle/working/bach-train' TRAIN_DIR = os.path.join(BASE_DIR, 'training') VAL_DIR = os.path.join(BASE_DIR, 'validation') TEST_DIR = os.path.join(BASE_DIR, 'test') if os.path.exists(BASE_DIR): shutil.rmtree(BASE_DIR) os.makedirs(BASE_DIR) os.makedirs(TRAIN_DIR) os.makedirs(VAL_DIR) os.makedirs(TEST_DIR) bc_types = [file for file in os.listdir(MAIN_DIR) if os.path.isdir(os.path.join(MAIN_DIR, file))] print('Types: ', bc_types) bc_dict = {'InSitu': 2, 'Benign': 1, 'Normal': 0, 'Invasive': 3} print('Encode: ', bc_dict) dict_bc = {0: 'Normal', 1: 'Benign', 2: 'InSitu', 3: 'Invasive'} print('Decode: ', dict_bc)
code
122248445/cell_4
[ "text_plain_output_1.png" ]
import tensorflow as tf try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() print('Device:', tpu.master()) tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) except: strategy = tf.distribute.get_strategy() print('Number of replicas:', strategy.num_replicas_in_sync)
code
122248445/cell_2
[ "text_html_output_1.png" ]
!apt-get update && apt-get install -y python3-opencv !pip install opencv-python !pip install seaborn import cv2 import seaborn as sns
code
122248445/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/init-bach-eda/bach_data.csv', index_col=0) df.head()
code
122248445/cell_15
[ "text_plain_output_1.png" ]
import os import pandas as pd import shutil MAIN_DIR = '/kaggle/input/bach-breast-cancer-histology-images/ICIAR2018_BACH_Challenge/ICIAR2018_BACH_Challenge/Photos' BASE_DIR = '/kaggle/working/bach-train' TRAIN_DIR = os.path.join(BASE_DIR, 'training') VAL_DIR = os.path.join(BASE_DIR, 'validation') TEST_DIR = os.path.join(BASE_DIR, 'test') if os.path.exists(BASE_DIR): shutil.rmtree(BASE_DIR) os.makedirs(BASE_DIR) os.makedirs(TRAIN_DIR) os.makedirs(VAL_DIR) os.makedirs(TEST_DIR) bc_types = [file for file in os.listdir(MAIN_DIR) if os.path.isdir(os.path.join(MAIN_DIR, file))] bc_dict = {'InSitu': 2, 'Benign': 1, 'Normal': 0, 'Invasive': 3} dict_bc = {0: 'Normal', 1: 'Benign', 2: 'InSitu', 3: 'Invasive'} for bc in bc_types: train_type_folder = os.path.join(TRAIN_DIR, bc) val_type_folder = os.path.join(VAL_DIR, bc) test_type_folder = os.path.join(TEST_DIR, bc) os.makedirs(train_type_folder) os.makedirs(val_type_folder) os.makedirs(test_type_folder) df = pd.read_csv('/kaggle/input/init-bach-eda/bach_data.csv', index_col=0) df['type'] = df['type'].map(bc_dict) from sklearn.model_selection import train_test_split type_dict = dict({0: list(), 1: list(), 2: list(), 3: list()}) for i in range(len(df)): path = df['path'][i] type_ = df['type'][i] if os.path.getsize(path): type_dict[type_].append(path) (len(type_dict[0]), len(type_dict[1]), len(type_dict[2]), len(type_dict[3]))
code
122248445/cell_3
[ "text_plain_output_1.png" ]
import tensorflow as tf from tensorflow.keras.preprocessing.image import img_to_array, ImageDataGenerator, load_img from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Conv2D, Dense, MaxPooling2D, Dropout, Flatten, BatchNormalization from tensorflow.keras.activations import softmax from tensorflow.keras.optimizers import SGD, Adamax
code
122248445/cell_17
[ "text_html_output_1.png" ]
print('Training size: ', len(x_train), len(y_train)) print('Val size: ', len(x_val), len(y_val)) print('Test size: ', len(x_test), len(y_test))
code
122248445/cell_12
[ "application_vnd.jupyter.stderr_output_1.png" ]
import os import pandas as pd import shutil MAIN_DIR = '/kaggle/input/bach-breast-cancer-histology-images/ICIAR2018_BACH_Challenge/ICIAR2018_BACH_Challenge/Photos' BASE_DIR = '/kaggle/working/bach-train' TRAIN_DIR = os.path.join(BASE_DIR, 'training') VAL_DIR = os.path.join(BASE_DIR, 'validation') TEST_DIR = os.path.join(BASE_DIR, 'test') if os.path.exists(BASE_DIR): shutil.rmtree(BASE_DIR) os.makedirs(BASE_DIR) os.makedirs(TRAIN_DIR) os.makedirs(VAL_DIR) os.makedirs(TEST_DIR) bc_types = [file for file in os.listdir(MAIN_DIR) if os.path.isdir(os.path.join(MAIN_DIR, file))] bc_dict = {'InSitu': 2, 'Benign': 1, 'Normal': 0, 'Invasive': 3} dict_bc = {0: 'Normal', 1: 'Benign', 2: 'InSitu', 3: 'Invasive'} df = pd.read_csv('/kaggle/input/init-bach-eda/bach_data.csv', index_col=0) df['type'] = df['type'].map(bc_dict) df.head()
code
122254156/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import pandas as pd df = pd.read_csv('nba_games.csv', index_col=0) df
code
89138263/cell_20
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings warnings.simplefilter('ignore') pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) train_transaction = pd.read_csv('../input/ieee-fraud-detection/train_transaction.csv') train_identity = pd.read_csv('../input/ieee-fraud-detection/train_identity.csv') test_transaction = pd.read_csv('../input/ieee-fraud-detection/test_transaction.csv') test_identity = pd.read_csv('../input/ieee-fraud-detection/test_identity.csv') sample_submission = pd.read_csv('../input/ieee-fraud-detection/sample_submission.csv') train_df = train_transaction.merge(train_identity, how='left', on='TransactionID') test_df = test_transaction.merge(test_identity, how='left', on='TransactionID') train_df = train_df.rename(columns=lambda x: ''.join(x.split('_'))) test_df = test_df.rename(columns=lambda x: '_'.join(x.split('-'))) test_df = test_df.rename(columns=lambda x: ''.join(x.split('_'))) def reduce_mem_usage(df, verbose=True): numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] start_mem = df.memory_usage().sum() / 1024**2 for col in df.columns: col_type = df[col].dtypes if col_type in numerics: c_min = df[col].min() c_max = df[col].max() if str(col_type)[:3] == 'int': if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: df[col] = df[col].astype(np.int8) elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: df[col] = df[col].astype(np.int16) elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: df[col] = df[col].astype(np.int32) elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max: df[col] = df[col].astype(np.int64) else: if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max: df[col] = df[col].astype(np.float16) elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max: df[col] = df[col].astype(np.float32) else: df[col] = df[col].astype(np.float64) end_mem = df.memory_usage().sum() / 1024**2 if verbose: print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(end_mem, 100 * (start_mem - end_mem) / start_mem)) return df def grab_col_names(dataframe, cat_th=61, car_th=74): """ It gives the names of categorical, numerical and categorical but cardinal variables in the data set. Note: Categorical variables with numerical appearance are also included in categorical variables. Parameters ------ dataframe: dataframe The dataframe from which variable names are to be retrieved cat_th: int, optional Class threshold value for numeric but categorical variables car_th: int, optinal Class threshold for categorical but cardinal variables Returns ------ cat_cols: list Categorical variable list num_cols: list Numerical variable list cat_but_car: list Categorical view cardinal variable list Examples ------ import seaborn as sns df = sns.load_dataset("iris") print(grab_col_names(df)) Notes ------ cat_cols + num_cols + cat_but_car = the total number of variables num_but_cat is inside cat_cols. The sum of 3 lists with return is equal to the total number of variables: cat_cols + num_cols + cat_but_car = number of variables """ cat_cols = [col for col in dataframe.columns if dataframe[col].dtypes == 'O'] num_but_cat = [col for col in dataframe.columns if dataframe[col].nunique() < cat_th and dataframe[col].dtypes != 'O'] cat_but_car = [col for col in dataframe.columns if dataframe[col].nunique() > car_th and dataframe[col].dtypes == 'O'] cat_cols = cat_cols + num_but_cat cat_cols = [col for col in cat_cols if col not in cat_but_car] num_cols = [col for col in dataframe.columns if dataframe[col].dtypes != 'O'] num_cols = [col for col in num_cols if col not in num_but_cat] return (cat_cols, num_cols, cat_but_car) def missing_values_table(dataframe, na_name=False): na_columns = [col for col in dataframe.columns if dataframe[col].isnull().sum() > 0] n_miss = dataframe[na_columns].isnull().sum().sort_values(ascending=False) ratio = (dataframe[na_columns].isnull().sum() / dataframe.shape[0] * 100).sort_values(ascending=False) missing_df = pd.concat([n_miss, np.round(ratio, 2)], axis=1, keys=['n_miss', 'ratio']) if na_name: return (na_columns, missing_df) na_cols, missing_df = missing_values_table(train_df, True) missing_df.reset_index(inplace=True)
code
89138263/cell_40
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings warnings.simplefilter('ignore') pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) train_transaction = pd.read_csv('../input/ieee-fraud-detection/train_transaction.csv') train_identity = pd.read_csv('../input/ieee-fraud-detection/train_identity.csv') test_transaction = pd.read_csv('../input/ieee-fraud-detection/test_transaction.csv') test_identity = pd.read_csv('../input/ieee-fraud-detection/test_identity.csv') sample_submission = pd.read_csv('../input/ieee-fraud-detection/sample_submission.csv') train_df = train_transaction.merge(train_identity, how='left', on='TransactionID') test_df = test_transaction.merge(test_identity, how='left', on='TransactionID') train_df = train_df.rename(columns=lambda x: ''.join(x.split('_'))) test_df = test_df.rename(columns=lambda x: '_'.join(x.split('-'))) test_df = test_df.rename(columns=lambda x: ''.join(x.split('_'))) for i in train_df.columns: if train_df[i].dtypes == 'int64' or train_df[i].dtypes == 'float64': train_df[i].fillna(train_df[i].mean(), inplace=True) for i in train_df.columns: if train_df[i].dtypes == 'object': train_df[i].fillna(train_df[i].mode()[0], inplace=True) train_df.isnull().sum()
code
89138263/cell_39
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings warnings.simplefilter('ignore') pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) train_transaction = pd.read_csv('../input/ieee-fraud-detection/train_transaction.csv') train_identity = pd.read_csv('../input/ieee-fraud-detection/train_identity.csv') test_transaction = pd.read_csv('../input/ieee-fraud-detection/test_transaction.csv') test_identity = pd.read_csv('../input/ieee-fraud-detection/test_identity.csv') sample_submission = pd.read_csv('../input/ieee-fraud-detection/sample_submission.csv') train_df = train_transaction.merge(train_identity, how='left', on='TransactionID') test_df = test_transaction.merge(test_identity, how='left', on='TransactionID') train_df = train_df.rename(columns=lambda x: ''.join(x.split('_'))) test_df = test_df.rename(columns=lambda x: '_'.join(x.split('-'))) test_df = test_df.rename(columns=lambda x: ''.join(x.split('_'))) def grab_col_names(dataframe, cat_th=61, car_th=74): """ It gives the names of categorical, numerical and categorical but cardinal variables in the data set. Note: Categorical variables with numerical appearance are also included in categorical variables. Parameters ------ dataframe: dataframe The dataframe from which variable names are to be retrieved cat_th: int, optional Class threshold value for numeric but categorical variables car_th: int, optinal Class threshold for categorical but cardinal variables Returns ------ cat_cols: list Categorical variable list num_cols: list Numerical variable list cat_but_car: list Categorical view cardinal variable list Examples ------ import seaborn as sns df = sns.load_dataset("iris") print(grab_col_names(df)) Notes ------ cat_cols + num_cols + cat_but_car = the total number of variables num_but_cat is inside cat_cols. The sum of 3 lists with return is equal to the total number of variables: cat_cols + num_cols + cat_but_car = number of variables """ cat_cols = [col for col in dataframe.columns if dataframe[col].dtypes == 'O'] num_but_cat = [col for col in dataframe.columns if dataframe[col].nunique() < cat_th and dataframe[col].dtypes != 'O'] cat_but_car = [col for col in dataframe.columns if dataframe[col].nunique() > car_th and dataframe[col].dtypes == 'O'] cat_cols = cat_cols + num_but_cat cat_cols = [col for col in cat_cols if col not in cat_but_car] num_cols = [col for col in dataframe.columns if dataframe[col].dtypes != 'O'] num_cols = [col for col in num_cols if col not in num_but_cat] return (cat_cols, num_cols, cat_but_car) for i in train_df.columns: if train_df[i].dtypes == 'int64' or train_df[i].dtypes == 'float64': train_df[i].fillna(train_df[i].mean(), inplace=True) for i in train_df.columns: if train_df[i].dtypes == 'object': train_df[i].fillna(train_df[i].mode()[0], inplace=True) cat_cols, num_cols, cat_but_car = grab_col_names(train_df)
code
89138263/cell_2
[ "text_plain_output_1.png" ]
import seaborn as sns import matplotlib.pyplot as plt from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV, cross_validate from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor, VotingClassifier from sklearn.preprocessing import MinMaxScaler, LabelEncoder, StandardScaler, RobustScaler from sklearn.impute import KNNImputer from sklearn.linear_model import LogisticRegression, LinearRegression, Ridge, Lasso, ElasticNet import numpy as np from sklearn.metrics import mean_squared_error, mean_absolute_error import warnings from sklearn.exceptions import ConvergenceWarning from sklearn.neighbors import KNeighborsRegressor from sklearn.tree import DecisionTreeRegressor from sklearn.svm import SVR from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, VotingRegressor from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score, roc_auc_score from sklearn.metrics import r2_score, mean_squared_error from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from xgboost import XGBClassifier from lightgbm import LGBMClassifier from catboost import CatBoostClassifier from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, VotingClassifier, AdaBoostClassifier from sklearn.model_selection import cross_validate, GridSearchCV from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.neighbors import LocalOutlierFactor from sklearn.metrics import classification_report
code
89138263/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
89138263/cell_47
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings warnings.simplefilter('ignore') pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) train_transaction = pd.read_csv('../input/ieee-fraud-detection/train_transaction.csv') train_identity = pd.read_csv('../input/ieee-fraud-detection/train_identity.csv') test_transaction = pd.read_csv('../input/ieee-fraud-detection/test_transaction.csv') test_identity = pd.read_csv('../input/ieee-fraud-detection/test_identity.csv') sample_submission = pd.read_csv('../input/ieee-fraud-detection/sample_submission.csv') train_df = train_transaction.merge(train_identity, how='left', on='TransactionID') test_df = test_transaction.merge(test_identity, how='left', on='TransactionID') train_df = train_df.rename(columns=lambda x: ''.join(x.split('_'))) test_df = test_df.rename(columns=lambda x: '_'.join(x.split('-'))) test_df = test_df.rename(columns=lambda x: ''.join(x.split('_'))) def reduce_mem_usage(df, verbose=True): numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] start_mem = df.memory_usage().sum() / 1024**2 for col in df.columns: col_type = df[col].dtypes if col_type in numerics: c_min = df[col].min() c_max = df[col].max() if str(col_type)[:3] == 'int': if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: df[col] = df[col].astype(np.int8) elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: df[col] = df[col].astype(np.int16) elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: df[col] = df[col].astype(np.int32) elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max: df[col] = df[col].astype(np.int64) else: if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max: df[col] = df[col].astype(np.float16) elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max: df[col] = df[col].astype(np.float32) else: df[col] = df[col].astype(np.float64) end_mem = df.memory_usage().sum() / 1024**2 if verbose: print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(end_mem, 100 * (start_mem - end_mem) / start_mem)) return df def grab_col_names(dataframe, cat_th=61, car_th=74): """ It gives the names of categorical, numerical and categorical but cardinal variables in the data set. Note: Categorical variables with numerical appearance are also included in categorical variables. Parameters ------ dataframe: dataframe The dataframe from which variable names are to be retrieved cat_th: int, optional Class threshold value for numeric but categorical variables car_th: int, optinal Class threshold for categorical but cardinal variables Returns ------ cat_cols: list Categorical variable list num_cols: list Numerical variable list cat_but_car: list Categorical view cardinal variable list Examples ------ import seaborn as sns df = sns.load_dataset("iris") print(grab_col_names(df)) Notes ------ cat_cols + num_cols + cat_but_car = the total number of variables num_but_cat is inside cat_cols. The sum of 3 lists with return is equal to the total number of variables: cat_cols + num_cols + cat_but_car = number of variables """ cat_cols = [col for col in dataframe.columns if dataframe[col].dtypes == 'O'] num_but_cat = [col for col in dataframe.columns if dataframe[col].nunique() < cat_th and dataframe[col].dtypes != 'O'] cat_but_car = [col for col in dataframe.columns if dataframe[col].nunique() > car_th and dataframe[col].dtypes == 'O'] cat_cols = cat_cols + num_but_cat cat_cols = [col for col in cat_cols if col not in cat_but_car] num_cols = [col for col in dataframe.columns if dataframe[col].dtypes != 'O'] num_cols = [col for col in num_cols if col not in num_but_cat] return (cat_cols, num_cols, cat_but_car) def missing_values_table(dataframe, na_name=False): na_columns = [col for col in dataframe.columns if dataframe[col].isnull().sum() > 0] n_miss = dataframe[na_columns].isnull().sum().sort_values(ascending=False) ratio = (dataframe[na_columns].isnull().sum() / dataframe.shape[0] * 100).sort_values(ascending=False) missing_df = pd.concat([n_miss, np.round(ratio, 2)], axis=1, keys=['n_miss', 'ratio']) if na_name: return (na_columns, missing_df) for i in train_df.columns: if train_df[i].dtypes == 'int64' or train_df[i].dtypes == 'float64': train_df[i].fillna(train_df[i].mean(), inplace=True) for i in test_df.columns: if test_df[i].dtypes == 'int64' or test_df[i].dtypes == 'float64': test_df[i].fillna(test_df[i].mean(), inplace=True) for i in train_df.columns: if train_df[i].dtypes == 'object': train_df[i].fillna(train_df[i].mode()[0], inplace=True) for i in test_df.columns: if test_df[i].dtypes == 'object': test_df[i].fillna(test_df[i].mode()[0], inplace=True) train_df.isnull().sum() num_cols2 = num_cols num_cols2.remove('TransactionID') train_df = pd.get_dummies(train_df[cat_cols + num_cols2], drop_first=True) cat_cols.remove('isFraud') test_df = pd.get_dummies(test_df[cat_cols + num_cols2], drop_first=True) test_df.shape
code
89138263/cell_46
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings warnings.simplefilter('ignore') pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) train_transaction = pd.read_csv('../input/ieee-fraud-detection/train_transaction.csv') train_identity = pd.read_csv('../input/ieee-fraud-detection/train_identity.csv') test_transaction = pd.read_csv('../input/ieee-fraud-detection/test_transaction.csv') test_identity = pd.read_csv('../input/ieee-fraud-detection/test_identity.csv') sample_submission = pd.read_csv('../input/ieee-fraud-detection/sample_submission.csv') train_df = train_transaction.merge(train_identity, how='left', on='TransactionID') test_df = test_transaction.merge(test_identity, how='left', on='TransactionID') train_df = train_df.rename(columns=lambda x: ''.join(x.split('_'))) test_df = test_df.rename(columns=lambda x: '_'.join(x.split('-'))) test_df = test_df.rename(columns=lambda x: ''.join(x.split('_'))) def reduce_mem_usage(df, verbose=True): numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] start_mem = df.memory_usage().sum() / 1024**2 for col in df.columns: col_type = df[col].dtypes if col_type in numerics: c_min = df[col].min() c_max = df[col].max() if str(col_type)[:3] == 'int': if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: df[col] = df[col].astype(np.int8) elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: df[col] = df[col].astype(np.int16) elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: df[col] = df[col].astype(np.int32) elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max: df[col] = df[col].astype(np.int64) else: if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max: df[col] = df[col].astype(np.float16) elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max: df[col] = df[col].astype(np.float32) else: df[col] = df[col].astype(np.float64) end_mem = df.memory_usage().sum() / 1024**2 if verbose: print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(end_mem, 100 * (start_mem - end_mem) / start_mem)) return df def grab_col_names(dataframe, cat_th=61, car_th=74): """ It gives the names of categorical, numerical and categorical but cardinal variables in the data set. Note: Categorical variables with numerical appearance are also included in categorical variables. Parameters ------ dataframe: dataframe The dataframe from which variable names are to be retrieved cat_th: int, optional Class threshold value for numeric but categorical variables car_th: int, optinal Class threshold for categorical but cardinal variables Returns ------ cat_cols: list Categorical variable list num_cols: list Numerical variable list cat_but_car: list Categorical view cardinal variable list Examples ------ import seaborn as sns df = sns.load_dataset("iris") print(grab_col_names(df)) Notes ------ cat_cols + num_cols + cat_but_car = the total number of variables num_but_cat is inside cat_cols. The sum of 3 lists with return is equal to the total number of variables: cat_cols + num_cols + cat_but_car = number of variables """ cat_cols = [col for col in dataframe.columns if dataframe[col].dtypes == 'O'] num_but_cat = [col for col in dataframe.columns if dataframe[col].nunique() < cat_th and dataframe[col].dtypes != 'O'] cat_but_car = [col for col in dataframe.columns if dataframe[col].nunique() > car_th and dataframe[col].dtypes == 'O'] cat_cols = cat_cols + num_but_cat cat_cols = [col for col in cat_cols if col not in cat_but_car] num_cols = [col for col in dataframe.columns if dataframe[col].dtypes != 'O'] num_cols = [col for col in num_cols if col not in num_but_cat] return (cat_cols, num_cols, cat_but_car) def missing_values_table(dataframe, na_name=False): na_columns = [col for col in dataframe.columns if dataframe[col].isnull().sum() > 0] n_miss = dataframe[na_columns].isnull().sum().sort_values(ascending=False) ratio = (dataframe[na_columns].isnull().sum() / dataframe.shape[0] * 100).sort_values(ascending=False) missing_df = pd.concat([n_miss, np.round(ratio, 2)], axis=1, keys=['n_miss', 'ratio']) if na_name: return (na_columns, missing_df) for i in train_df.columns: if train_df[i].dtypes == 'int64' or train_df[i].dtypes == 'float64': train_df[i].fillna(train_df[i].mean(), inplace=True) for i in train_df.columns: if train_df[i].dtypes == 'object': train_df[i].fillna(train_df[i].mode()[0], inplace=True) train_df.isnull().sum() num_cols2 = num_cols num_cols2.remove('TransactionID') train_df = pd.get_dummies(train_df[cat_cols + num_cols2], drop_first=True) train_df.shape
code
89138263/cell_24
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings warnings.simplefilter('ignore') pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) train_transaction = pd.read_csv('../input/ieee-fraud-detection/train_transaction.csv') train_identity = pd.read_csv('../input/ieee-fraud-detection/train_identity.csv') test_transaction = pd.read_csv('../input/ieee-fraud-detection/test_transaction.csv') test_identity = pd.read_csv('../input/ieee-fraud-detection/test_identity.csv') sample_submission = pd.read_csv('../input/ieee-fraud-detection/sample_submission.csv') train_df = train_transaction.merge(train_identity, how='left', on='TransactionID') test_df = test_transaction.merge(test_identity, how='left', on='TransactionID') train_df = train_df.rename(columns=lambda x: ''.join(x.split('_'))) test_df = test_df.rename(columns=lambda x: '_'.join(x.split('-'))) test_df = test_df.rename(columns=lambda x: ''.join(x.split('_'))) def grab_col_names(dataframe, cat_th=61, car_th=74): """ It gives the names of categorical, numerical and categorical but cardinal variables in the data set. Note: Categorical variables with numerical appearance are also included in categorical variables. Parameters ------ dataframe: dataframe The dataframe from which variable names are to be retrieved cat_th: int, optional Class threshold value for numeric but categorical variables car_th: int, optinal Class threshold for categorical but cardinal variables Returns ------ cat_cols: list Categorical variable list num_cols: list Numerical variable list cat_but_car: list Categorical view cardinal variable list Examples ------ import seaborn as sns df = sns.load_dataset("iris") print(grab_col_names(df)) Notes ------ cat_cols + num_cols + cat_but_car = the total number of variables num_but_cat is inside cat_cols. The sum of 3 lists with return is equal to the total number of variables: cat_cols + num_cols + cat_but_car = number of variables """ cat_cols = [col for col in dataframe.columns if dataframe[col].dtypes == 'O'] num_but_cat = [col for col in dataframe.columns if dataframe[col].nunique() < cat_th and dataframe[col].dtypes != 'O'] cat_but_car = [col for col in dataframe.columns if dataframe[col].nunique() > car_th and dataframe[col].dtypes == 'O'] cat_cols = cat_cols + num_but_cat cat_cols = [col for col in cat_cols if col not in cat_but_car] num_cols = [col for col in dataframe.columns if dataframe[col].dtypes != 'O'] num_cols = [col for col in num_cols if col not in num_but_cat] return (cat_cols, num_cols, cat_but_car) cat_cols, num_cols, cat_but_car = grab_col_names(train_df)
code
2033671/cell_4
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_absolute_error from sklearn.tree import DecisionTreeRegressor import pandas as pd import pandas as pd main_file_path = '../input/train.csv' data = pd.read_csv(main_file_path) home_sale_price = data.SalePrice column_interest = ['BsmtFinSF1', 'SalePrice'] two_column_data = data[column_interest] y = data.SalePrice Cost_predictors = ['LotArea', 'YearBuilt', '1stFlrSF', '2ndFlrSF', 'FullBath', 'BedroomAbvGr', 'TotRmsAbvGrd'] X = data[Cost_predictors] from sklearn.tree import DecisionTreeRegressor price_model = DecisionTreeRegressor() price_model.fit(X, y) from sklearn.metrics import mean_absolute_error predicted_home_price = price_model.predict(X) mean_absolute_error(y, predicted_home_price)
code
2033671/cell_6
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error from sklearn.tree import DecisionTreeRegressor from sklearn.tree import DecisionTreeRegressor from sklearn.metrics import mean_absolute_error from sklearn.tree import DecisionTreeRegressor def get_mae(max_leaf_nodes, predictors_train, predictors_val, targ_train, targ_val): model = DecisionTreeRegressor(max_leaf_nodes=max_leaf_nodes, random_state=0) model.fit(predictors_train, targ_train) preds_val = model.predict(predictors_val) mae = mean_absolute_error(targ_val, preds_val) return mae for max_leaf_nodes in [5, 50, 500, 5000]: my_mae = get_mae(max_leaf_nodes, train_X, val_X, train_y, val_y) print('max_leaf_nodes: %d \t\t Mean_Absolute_error: %d' % (max_leaf_nodes, my_mae))
code
2033671/cell_2
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd import pandas as pd main_file_path = '../input/train.csv' data = pd.read_csv(main_file_path) print(data.columns) home_sale_price = data.SalePrice print(home_sale_price.head()) column_interest = ['BsmtFinSF1', 'SalePrice'] two_column_data = data[column_interest] two_column_data.describe()
code
2033671/cell_7
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error forest_model = RandomForestRegressor() forest_model.fit(train_X, train_y) price_preds = forest_model.predict(val_X) print(mean_absolute_error(val_y, price_preds))
code
2033671/cell_3
[ "text_plain_output_1.png" ]
from sklearn.tree import DecisionTreeRegressor import pandas as pd import pandas as pd main_file_path = '../input/train.csv' data = pd.read_csv(main_file_path) home_sale_price = data.SalePrice column_interest = ['BsmtFinSF1', 'SalePrice'] two_column_data = data[column_interest] y = data.SalePrice Cost_predictors = ['LotArea', 'YearBuilt', '1stFlrSF', '2ndFlrSF', 'FullBath', 'BedroomAbvGr', 'TotRmsAbvGrd'] X = data[Cost_predictors] from sklearn.tree import DecisionTreeRegressor price_model = DecisionTreeRegressor() price_model.fit(X, y) print('making prediction for following houses') print(X.head()) print('The predictions are: ') print(price_model.predict(X.head()))
code
2033671/cell_5
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from sklearn.metrics import mean_absolute_error from sklearn.tree import DecisionTreeRegressor import pandas as pd import pandas as pd main_file_path = '../input/train.csv' data = pd.read_csv(main_file_path) home_sale_price = data.SalePrice column_interest = ['BsmtFinSF1', 'SalePrice'] two_column_data = data[column_interest] y = data.SalePrice Cost_predictors = ['LotArea', 'YearBuilt', '1stFlrSF', '2ndFlrSF', 'FullBath', 'BedroomAbvGr', 'TotRmsAbvGrd'] X = data[Cost_predictors] from sklearn.tree import DecisionTreeRegressor price_model = DecisionTreeRegressor() price_model.fit(X, y) from sklearn.metrics import mean_absolute_error predicted_home_price = price_model.predict(X) mean_absolute_error(y, predicted_home_price) from sklearn.model_selection import train_test_split train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=0) price_model.fit(train_X, train_y) val_predictions = price_model.predict(val_X) print(mean_absolute_error(val_y, val_predictions))
code
16134883/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/Wholesale customers data.csv') data.shape data.describe().T print('Showing Meta Data :') data.info()
code
16134883/cell_25
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/Wholesale customers data.csv') data.shape data.describe().T data.Region.value_counts() data.Channel.value_counts() dataset = data.copy() def continous_data(i): if dataset[i].dtype != 'object': plt.clf() def categorical_data(i): dataset[i].value_counts().plot(kind='bar') j_1 = ['Channel', 'Region'] for k in j_1: categorical_data(i=k) plt.show()
code
16134883/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/Wholesale customers data.csv') data.shape
code
16134883/cell_34
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np import pandas as pd data = pd.read_csv('../input/Wholesale customers data.csv') data.shape data.describe().T data.Region.value_counts() data.Channel.value_counts() dataset = data.copy() log_data = np.log(dataset[['Fresh', 'Milk', 'Grocery', 'Frozen', 'Detergents_Paper', 'Delicassen']].copy()) list(log_data.columns)
code
16134883/cell_30
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns data = pd.read_csv('../input/Wholesale customers data.csv') data.shape data.describe().T data.Region.value_counts() data.Channel.value_counts() dataset = data.copy() def continous_data(i): if dataset[i].dtype != 'object': plt.clf() sns.set() j = ['Fresh', 'Milk', 'Grocery', 'Frozen', 'Detergents_Paper', 'Delicassen'] for k in j: continous_data(i=k) log_data = np.log(dataset[['Fresh', 'Milk', 'Grocery', 'Frozen', 'Detergents_Paper', 'Delicassen']].copy()) def categorical_data(i): pass j_1 = ['Channel', 'Region'] for k in j_1: categorical_data(i=k) dataset.corr() def scatterplot(i, j): pass scatterplot(i='Milk', j='Detergents_Paper')
code
16134883/cell_20
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/Wholesale customers data.csv') data.shape data.describe().T data.Region.value_counts() data.Channel.value_counts() dataset = data.copy() def continous_data(i): if dataset[i].dtype != 'object': plt.clf() sns.set() j = ['Fresh', 'Milk', 'Grocery', 'Frozen', 'Detergents_Paper', 'Delicassen'] for k in j: continous_data(i=k)
code
16134883/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/Wholesale customers data.csv') data.shape type(data)
code
16134883/cell_29
[ "image_output_11.png", "text_plain_output_5.png", "text_plain_output_4.png", "image_output_5.png", "text_plain_output_6.png", "image_output_7.png", "text_plain_output_3.png", "image_output_4.png", "text_plain_output_7.png", "image_output_8.png", "image_output_6.png", "image_output_12.png", "text_plain_output_2.png", "text_plain_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_9.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns data = pd.read_csv('../input/Wholesale customers data.csv') data.shape data.describe().T data.Region.value_counts() data.Channel.value_counts() dataset = data.copy() def continous_data(i): if dataset[i].dtype != 'object': plt.clf() sns.set() j = ['Fresh', 'Milk', 'Grocery', 'Frozen', 'Detergents_Paper', 'Delicassen'] for k in j: continous_data(i=k) log_data = np.log(dataset[['Fresh', 'Milk', 'Grocery', 'Frozen', 'Detergents_Paper', 'Delicassen']].copy()) def categorical_data(i): pass j_1 = ['Channel', 'Region'] for k in j_1: categorical_data(i=k) dataset.corr() def scatterplot(i, j): pass scatterplot(i='Milk', j='Grocery')
code
16134883/cell_26
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/Wholesale customers data.csv') data.shape data.describe().T data.Region.value_counts() data.Channel.value_counts() dataset = data.copy() dataset.corr()
code
16134883/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/Wholesale customers data.csv') data.shape data.describe().T data.Region.value_counts()
code
16134883/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/Wholesale customers data.csv') data.shape data.describe().T data.Region.value_counts() data.Channel.value_counts() dataset = data.copy() dataset.head()
code
16134883/cell_32
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns data = pd.read_csv('../input/Wholesale customers data.csv') data.shape data.describe().T pd.isnull(data).sum() data.Region.value_counts() data.Channel.value_counts() dataset = data.copy() def continous_data(i): if dataset[i].dtype != 'object': plt.clf() sns.set() j = ['Fresh', 'Milk', 'Grocery', 'Frozen', 'Detergents_Paper', 'Delicassen'] for k in j: continous_data(i=k) log_data = np.log(dataset[['Fresh', 'Milk', 'Grocery', 'Frozen', 'Detergents_Paper', 'Delicassen']].copy()) def categorical_data(i): pass j_1 = ['Channel', 'Region'] for k in j_1: categorical_data(i=k) dataset.corr() def scatterplot(i, j): pass def categorical_multi(i, j): pd.crosstab(dataset[i], dataset[j]).plot(kind='bar') plt.show() print(pd.crosstab(dataset[i], dataset[j])) categorical_multi(i='Channel', j='Region')
code
16134883/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/Wholesale customers data.csv') data.shape print('Descriptive Statastics of our Data:') data.describe().T
code
16134883/cell_38
[ "image_output_1.png" ]
import numpy as np import pandas as pd data = pd.read_csv('../input/Wholesale customers data.csv') data.shape data.describe().T data.Region.value_counts() data.Channel.value_counts() dataset = data.copy() log_data = np.log(dataset[['Fresh', 'Milk', 'Grocery', 'Frozen', 'Detergents_Paper', 'Delicassen']].copy()) list(log_data.columns) for k in list(log_data.columns): IQR = np.percentile(log_data[k], 75) - np.percentile(log_data[k], 25) Outlier_top = np.percentile(log_data[k], 75) + 1.5 * IQR Outlier_bottom = np.percentile(log_data[k], 25) - 1.5 * IQR log_data[k] = np.where(log_data[k] > Outlier_top, Outlier_top, log_data[k]) log_data[k] = np.where(log_data[k] < Outlier_bottom, Outlier_bottom, log_data[k]) dataset1 = log_data.copy() list(dataset1.columns)
code
16134883/cell_3
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd import warnings warnings.filterwarnings('ignore') import seaborn as sns import matplotlib.pyplot as plt import os print(os.listdir('../input'))
code
16134883/cell_31
[ "image_output_2.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns data = pd.read_csv('../input/Wholesale customers data.csv') data.shape data.describe().T data.Region.value_counts() data.Channel.value_counts() dataset = data.copy() def continous_data(i): if dataset[i].dtype != 'object': plt.clf() sns.set() j = ['Fresh', 'Milk', 'Grocery', 'Frozen', 'Detergents_Paper', 'Delicassen'] for k in j: continous_data(i=k) log_data = np.log(dataset[['Fresh', 'Milk', 'Grocery', 'Frozen', 'Detergents_Paper', 'Delicassen']].copy()) def categorical_data(i): pass j_1 = ['Channel', 'Region'] for k in j_1: categorical_data(i=k) dataset.corr() def scatterplot(i, j): pass scatterplot(i='Detergents_Paper', j='Grocery')
code
16134883/cell_24
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd data = pd.read_csv('../input/Wholesale customers data.csv') data.shape data.describe().T data.Region.value_counts() data.Channel.value_counts() dataset = data.copy() log_data = np.log(dataset[['Fresh', 'Milk', 'Grocery', 'Frozen', 'Detergents_Paper', 'Delicassen']].copy()) log_data.head()
code
16134883/cell_22
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/Wholesale customers data.csv') data.shape data.describe().T data.Region.value_counts() data.Channel.value_counts() dataset = data.copy() dataset.head()
code
16134883/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/Wholesale customers data.csv') data.shape data.describe().T pd.isnull(data).sum()
code
16134883/cell_27
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/Wholesale customers data.csv') data.shape data.describe().T data.Region.value_counts() data.Channel.value_counts() dataset = data.copy() def continous_data(i): if dataset[i].dtype != 'object': plt.clf() sns.set() j = ['Fresh', 'Milk', 'Grocery', 'Frozen', 'Detergents_Paper', 'Delicassen'] for k in j: continous_data(i=k) def categorical_data(i): pass j_1 = ['Channel', 'Region'] for k in j_1: categorical_data(i=k) dataset.corr() print('Correlation Heat map of the data') plt.figure(figsize=(10, 6)) sns.heatmap(dataset.corr(), annot=True, fmt='.2f', vmin=-1, vmax=1, cmap='Spectral') plt.show()
code
16134883/cell_37
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns data = pd.read_csv('../input/Wholesale customers data.csv') data.shape data.describe().T pd.isnull(data).sum() data.Region.value_counts() data.Channel.value_counts() dataset = data.copy() def continous_data(i): if dataset[i].dtype != 'object': plt.clf() sns.set() j = ['Fresh', 'Milk', 'Grocery', 'Frozen', 'Detergents_Paper', 'Delicassen'] for k in j: continous_data(i=k) log_data = np.log(dataset[['Fresh', 'Milk', 'Grocery', 'Frozen', 'Detergents_Paper', 'Delicassen']].copy()) def categorical_data(i): pass j_1 = ['Channel', 'Region'] for k in j_1: categorical_data(i=k) dataset.corr() def scatterplot(i, j): pass def categorical_multi(i, j): pass categorical_multi(i='Channel', j='Region') list(log_data.columns) for k in list(log_data.columns): IQR = np.percentile(log_data[k], 75) - np.percentile(log_data[k], 25) Outlier_top = np.percentile(log_data[k], 75) + 1.5 * IQR Outlier_bottom = np.percentile(log_data[k], 25) - 1.5 * IQR log_data[k] = np.where(log_data[k] > Outlier_top, Outlier_top, log_data[k]) log_data[k] = np.where(log_data[k] < Outlier_bottom, Outlier_bottom, log_data[k]) def continous_data(i): if log_data[i].dtype != 'object': log_data[i].plot.kde() plt.clf() for k in j: continous_data(i=k) sns.pairplot(log_data, diag_kind='kde')
code
16134883/cell_12
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/Wholesale customers data.csv') data.shape data.describe().T data.Region.value_counts() data.Channel.value_counts()
code
16134883/cell_5
[ "image_output_11.png", "text_plain_output_5.png", "text_plain_output_4.png", "image_output_5.png", "text_plain_output_6.png", "image_output_7.png", "text_plain_output_3.png", "image_output_4.png", "text_plain_output_7.png", "image_output_8.png", "image_output_6.png", "image_output_12.png", "text_plain_output_2.png", "text_plain_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_9.png" ]
import pandas as pd data = pd.read_csv('../input/Wholesale customers data.csv') data.shape data.head()
code
16134883/cell_36
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns data = pd.read_csv('../input/Wholesale customers data.csv') data.shape data.describe().T pd.isnull(data).sum() data.Region.value_counts() data.Channel.value_counts() dataset = data.copy() def continous_data(i): if dataset[i].dtype != 'object': plt.clf() sns.set() j = ['Fresh', 'Milk', 'Grocery', 'Frozen', 'Detergents_Paper', 'Delicassen'] for k in j: continous_data(i=k) log_data = np.log(dataset[['Fresh', 'Milk', 'Grocery', 'Frozen', 'Detergents_Paper', 'Delicassen']].copy()) def categorical_data(i): pass j_1 = ['Channel', 'Region'] for k in j_1: categorical_data(i=k) dataset.corr() def scatterplot(i, j): pass def categorical_multi(i, j): pass categorical_multi(i='Channel', j='Region') list(log_data.columns) for k in list(log_data.columns): IQR = np.percentile(log_data[k], 75) - np.percentile(log_data[k], 25) Outlier_top = np.percentile(log_data[k], 75) + 1.5 * IQR Outlier_bottom = np.percentile(log_data[k], 25) - 1.5 * IQR log_data[k] = np.where(log_data[k] > Outlier_top, Outlier_top, log_data[k]) log_data[k] = np.where(log_data[k] < Outlier_bottom, Outlier_bottom, log_data[k]) def continous_data(i): if log_data[i].dtype != 'object': print('--' * 60) sns.boxplot(log_data[i]) plt.title('Boxplot of ' + str(i)) plt.show() plt.title('histogram of ' + str(i)) log_data[i].plot.kde() plt.show() plt.clf() for k in j: continous_data(i=k)
code
2011240/cell_42
[ "text_plain_output_1.png" ]
from pandas import Series, DataFrame import pandas as pd import seaborn as sns titanic = pd.read_csv('../input/train.csv') fig = sns.FacetGrid(titanic, hue='Sex', aspect=4) fig.map(sns.kdeplot, 'Age', shade=True) oldest = titanic['Age'].max() fig.set(xlim=(0, oldest)) fig.add_legend() fig = sns.FacetGrid(titanic, hue='person', aspect=4) fig.map(sns.kdeplot, 'Age', shade=True) oldest = titanic['Age'].max() fig.set(xlim=(0, oldest)) fig.add_legend() fig = sns.FacetGrid(titanic, hue='Pclass', aspect=4) fig.map(sns.kdeplot, 'Age', shade=True) oldest = titanic['Age'].max() fig.set(xlim=(0, oldest)) fig.add_legend() deck = titanic['Cabin'].dropna() levels = [] for level in deck: levels.append(level[0]) cabin = DataFrame(levels) cabin.columns = ['Cabin'] cabin = cabin[cabin.Cabin != 'T'] titanic['Alone'] = titanic.SibSp + titanic.Parch sns.factorplot('Alone', data=titanic, kind='count', palette='hot')
code
2011240/cell_21
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns titanic = pd.read_csv('../input/train.csv') fig = sns.FacetGrid(titanic, hue='Sex', aspect=4) fig.map(sns.kdeplot, 'Age', shade=True) oldest = titanic['Age'].max() fig.set(xlim=(0, oldest)) fig.add_legend() fig = sns.FacetGrid(titanic, hue='person', aspect=4) fig.map(sns.kdeplot, 'Age', shade=True) oldest = titanic['Age'].max() fig.set(xlim=(0, oldest)) fig.add_legend() fig = sns.FacetGrid(titanic, hue='Pclass', aspect=4) fig.map(sns.kdeplot, 'Age', shade=True) oldest = titanic['Age'].max() fig.set(xlim=(0, oldest)) fig.add_legend()
code
2011240/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd titanic = pd.read_csv('../input/train.csv') def split(passenger): age, sex = passenger if age < 16: return 'child' else: return sex titanic['person'] = titanic[['Age', 'Sex']].apply(split, axis=1) titanic[0:10]
code
2011240/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns titanic = pd.read_csv('../input/train.csv') sns.factorplot('Sex', data=titanic, hue='Pclass', kind='count')
code
2011240/cell_25
[ "text_plain_output_1.png" ]
import pandas as pd titanic = pd.read_csv('../input/train.csv') deck = titanic['Cabin'].dropna() deck.head()
code
2011240/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd titanic = pd.read_csv('../input/train.csv') titanic.info()
code
2011240/cell_34
[ "text_plain_output_1.png", "image_output_1.png" ]
from pandas import Series, DataFrame import pandas as pd import seaborn as sns titanic = pd.read_csv('../input/train.csv') fig = sns.FacetGrid(titanic, hue='Sex', aspect=4) fig.map(sns.kdeplot, 'Age', shade=True) oldest = titanic['Age'].max() fig.set(xlim=(0, oldest)) fig.add_legend() fig = sns.FacetGrid(titanic, hue='person', aspect=4) fig.map(sns.kdeplot, 'Age', shade=True) oldest = titanic['Age'].max() fig.set(xlim=(0, oldest)) fig.add_legend() fig = sns.FacetGrid(titanic, hue='Pclass', aspect=4) fig.map(sns.kdeplot, 'Age', shade=True) oldest = titanic['Age'].max() fig.set(xlim=(0, oldest)) fig.add_legend() deck = titanic['Cabin'].dropna() levels = [] for level in deck: levels.append(level[0]) cabin = DataFrame(levels) cabin.columns = ['Cabin'] cabin = cabin[cabin.Cabin != 'T'] sns.factorplot('Embarked', hue='Pclass', data=titanic, kind='count', palette='ocean')
code
2011240/cell_23
[ "text_plain_output_1.png" ]
import pandas as pd titanic = pd.read_csv('../input/train.csv') titanic.head()
code
2011240/cell_30
[ "text_plain_output_1.png", "image_output_1.png" ]
from pandas import Series, DataFrame import pandas as pd import seaborn as sns titanic = pd.read_csv('../input/train.csv') fig = sns.FacetGrid(titanic, hue='Sex', aspect=4) fig.map(sns.kdeplot, 'Age', shade=True) oldest = titanic['Age'].max() fig.set(xlim=(0, oldest)) fig.add_legend() fig = sns.FacetGrid(titanic, hue='person', aspect=4) fig.map(sns.kdeplot, 'Age', shade=True) oldest = titanic['Age'].max() fig.set(xlim=(0, oldest)) fig.add_legend() fig = sns.FacetGrid(titanic, hue='Pclass', aspect=4) fig.map(sns.kdeplot, 'Age', shade=True) oldest = titanic['Age'].max() fig.set(xlim=(0, oldest)) fig.add_legend() deck = titanic['Cabin'].dropna() levels = [] for level in deck: levels.append(level[0]) cabin = DataFrame(levels) cabin.columns = ['Cabin'] cabin = cabin[cabin.Cabin != 'T'] sns.factorplot('Cabin', data=cabin, palette='rainbow', kind='count')
code
2011240/cell_33
[ "text_plain_output_1.png" ]
from pandas import Series, DataFrame import pandas as pd import seaborn as sns titanic = pd.read_csv('../input/train.csv') fig = sns.FacetGrid(titanic, hue='Sex', aspect=4) fig.map(sns.kdeplot, 'Age', shade=True) oldest = titanic['Age'].max() fig.set(xlim=(0, oldest)) fig.add_legend() fig = sns.FacetGrid(titanic, hue='person', aspect=4) fig.map(sns.kdeplot, 'Age', shade=True) oldest = titanic['Age'].max() fig.set(xlim=(0, oldest)) fig.add_legend() fig = sns.FacetGrid(titanic, hue='Pclass', aspect=4) fig.map(sns.kdeplot, 'Age', shade=True) oldest = titanic['Age'].max() fig.set(xlim=(0, oldest)) fig.add_legend() deck = titanic['Cabin'].dropna() levels = [] for level in deck: levels.append(level[0]) cabin = DataFrame(levels) cabin.columns = ['Cabin'] cabin = cabin[cabin.Cabin != 'T'] sns.factorplot('Embarked', data=titanic, kind='count', color='blue')
code
2011240/cell_44
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd titanic = pd.read_csv('../input/train.csv') titanic['Alone'] = titanic.SibSp + titanic.Parch titanic['Survivor'] = titanic.Survived.map({0: 'No', 1: 'Yes'}) titanic.head()
code
2011240/cell_20
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns titanic = pd.read_csv('../input/train.csv') fig = sns.FacetGrid(titanic, hue='Sex', aspect=4) fig.map(sns.kdeplot, 'Age', shade=True) oldest = titanic['Age'].max() fig.set(xlim=(0, oldest)) fig.add_legend() fig = sns.FacetGrid(titanic, hue='person', aspect=4) fig.map(sns.kdeplot, 'Age', shade=True) oldest = titanic['Age'].max() fig.set(xlim=(0, oldest)) fig.add_legend()
code
2011240/cell_40
[ "text_html_output_1.png" ]
import pandas as pd titanic = pd.read_csv('../input/train.csv') titanic['Alone'] = titanic.SibSp + titanic.Parch titanic['Alone'].loc[titanic['Alone'] > 0] = 'With Family' titanic['Alone'].loc[titanic['Alone'] == 0] = 'Alone'
code
2011240/cell_39
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd titanic = pd.read_csv('../input/train.csv') titanic['Alone'] = titanic.SibSp + titanic.Parch titanic['Alone']
code
2011240/cell_41
[ "text_html_output_1.png" ]
import pandas as pd titanic = pd.read_csv('../input/train.csv') titanic['Alone'] = titanic.SibSp + titanic.Parch titanic.head()
code
2011240/cell_19
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns titanic = pd.read_csv('../input/train.csv') fig = sns.FacetGrid(titanic, hue='Sex', aspect=4) fig.map(sns.kdeplot, 'Age', shade=True) oldest = titanic['Age'].max() fig.set(xlim=(0, oldest)) fig.add_legend()
code
2011240/cell_45
[ "text_html_output_1.png" ]
from pandas import Series, DataFrame import pandas as pd import seaborn as sns titanic = pd.read_csv('../input/train.csv') fig = sns.FacetGrid(titanic, hue='Sex', aspect=4) fig.map(sns.kdeplot, 'Age', shade=True) oldest = titanic['Age'].max() fig.set(xlim=(0, oldest)) fig.add_legend() fig = sns.FacetGrid(titanic, hue='person', aspect=4) fig.map(sns.kdeplot, 'Age', shade=True) oldest = titanic['Age'].max() fig.set(xlim=(0, oldest)) fig.add_legend() fig = sns.FacetGrid(titanic, hue='Pclass', aspect=4) fig.map(sns.kdeplot, 'Age', shade=True) oldest = titanic['Age'].max() fig.set(xlim=(0, oldest)) fig.add_legend() deck = titanic['Cabin'].dropna() levels = [] for level in deck: levels.append(level[0]) cabin = DataFrame(levels) cabin.columns = ['Cabin'] cabin = cabin[cabin.Cabin != 'T'] titanic['Alone'] = titanic.SibSp + titanic.Parch titanic['Survivor'] = titanic.Survived.map({0: 'No', 1: 'Yes'}) sns.factorplot('Survivor', data=titanic, kind='count')
code
2011240/cell_18
[ "text_html_output_1.png" ]
import pandas as pd titanic = pd.read_csv('../input/train.csv') titanic['person'].value_counts()
code
2011240/cell_32
[ "text_html_output_1.png" ]
import pandas as pd titanic = pd.read_csv('../input/train.csv') titanic.head()
code
2011240/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns titanic = pd.read_csv('../input/train.csv') sns.factorplot('Sex', data=titanic, kind='count', color='red')
code
2011240/cell_15
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd titanic = pd.read_csv('../input/train.csv') titanic['Age'].hist(bins=70, color='blue')
code
2011240/cell_16
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd titanic = pd.read_csv('../input/train.csv') plt.hist(titanic['Age'].dropna(), bins=70)
code
2011240/cell_38
[ "text_html_output_1.png" ]
import pandas as pd titanic = pd.read_csv('../input/train.csv') titanic['Alone'] = titanic.SibSp + titanic.Parch titanic.head()
code
2011240/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd titanic = pd.read_csv('../input/train.csv') titanic.head()
code
2011240/cell_17
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd titanic = pd.read_csv('../input/train.csv') titanic['Age'].mean()
code
2011240/cell_14
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns titanic = pd.read_csv('../input/train.csv') sns.factorplot('Pclass', hue='person', data=titanic, kind='count')
code
2011240/cell_10
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns titanic = pd.read_csv('../input/train.csv') sns.factorplot('Pclass', data=titanic, hue='Sex', kind='count')
code
2011240/cell_27
[ "text_plain_output_1.png", "image_output_1.png" ]
from pandas import Series, DataFrame import pandas as pd import seaborn as sns titanic = pd.read_csv('../input/train.csv') fig = sns.FacetGrid(titanic, hue='Sex', aspect=4) fig.map(sns.kdeplot, 'Age', shade=True) oldest = titanic['Age'].max() fig.set(xlim=(0, oldest)) fig.add_legend() fig = sns.FacetGrid(titanic, hue='person', aspect=4) fig.map(sns.kdeplot, 'Age', shade=True) oldest = titanic['Age'].max() fig.set(xlim=(0, oldest)) fig.add_legend() fig = sns.FacetGrid(titanic, hue='Pclass', aspect=4) fig.map(sns.kdeplot, 'Age', shade=True) oldest = titanic['Age'].max() fig.set(xlim=(0, oldest)) fig.add_legend() deck = titanic['Cabin'].dropna() levels = [] for level in deck: levels.append(level[0]) cabin = DataFrame(levels) cabin.columns = ['Cabin'] sns.factorplot('Cabin', data=cabin, palette='winter_d', kind='count')
code
2011240/cell_5
[ "text_html_output_1.png" ]
import pandas as pd titanic = pd.read_csv('../input/train.csv') titanic.describe()
code
2011240/cell_36
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd titanic = pd.read_csv('../input/train.csv') titanic.head()
code
128024882/cell_13
[ "text_plain_output_1.png" ]
from statsmodels.sandbox.regression import gmm import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/comp-macro/data.csv') data = data.dropna() instrument_df = data[['cons_lagged1', 'interest_lagged_1', 'lagged_cons_growth']] endog_df = data[['int_forward', 'cons_forward', 'PCE']] endog, instrument = map(np.asarray, [endog_df, instrument_df]) def moment_consumption1(params, endog): beta, gamma = params r_forw1, c_forw1, c = endog.T err = beta * (1 + r_forw1) * np.power(c_forw1 / c, -gamma) - 1 return err dependant = np.ones(endog.shape[0]) mod1 = gmm.NonlinearIVGMM(dependant, endog, instrument, moment_consumption1, k_moms=4) I = np.eye(3) res_two_way = mod1.fit([1, -1], maxiter=2, inv_weights=I) res_iterated = mod1.fit([1, -1], maxiter=100, inv_weights=I, weights_method='hac', wargs={'maxlag': 4}) start = (0, 0) CUE_results = mod1.fitgmm_cu(start, optim_method='bfgs', optim_args=None) print(CUE_results)
code
128024882/cell_9
[ "text_plain_output_1.png" ]
from statsmodels.sandbox.regression import gmm import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/comp-macro/data.csv') data = data.dropna() instrument_df = data[['cons_lagged1', 'interest_lagged_1', 'lagged_cons_growth']] endog_df = data[['int_forward', 'cons_forward', 'PCE']] endog, instrument = map(np.asarray, [endog_df, instrument_df]) def moment_consumption1(params, endog): beta, gamma = params r_forw1, c_forw1, c = endog.T err = beta * (1 + r_forw1) * np.power(c_forw1 / c, -gamma) - 1 return err dependant = np.ones(endog.shape[0]) mod1 = gmm.NonlinearIVGMM(dependant, endog, instrument, moment_consumption1, k_moms=4) I = np.eye(3) res_two_way = mod1.fit([1, -1], maxiter=2, inv_weights=I)
code
128024882/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/comp-macro/data.csv') data = data.dropna() data.head()
code
128024882/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
128024882/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/comp-macro/data.csv') data = data.dropna() data.columns
code
128024882/cell_10
[ "text_html_output_1.png" ]
from statsmodels.sandbox.regression import gmm import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/comp-macro/data.csv') data = data.dropna() instrument_df = data[['cons_lagged1', 'interest_lagged_1', 'lagged_cons_growth']] endog_df = data[['int_forward', 'cons_forward', 'PCE']] endog, instrument = map(np.asarray, [endog_df, instrument_df]) def moment_consumption1(params, endog): beta, gamma = params r_forw1, c_forw1, c = endog.T err = beta * (1 + r_forw1) * np.power(c_forw1 / c, -gamma) - 1 return err dependant = np.ones(endog.shape[0]) mod1 = gmm.NonlinearIVGMM(dependant, endog, instrument, moment_consumption1, k_moms=4) I = np.eye(3) res_two_way = mod1.fit([1, -1], maxiter=2, inv_weights=I) print(res_two_way.summary(yname='Euler Eq', xname=['discount', 'CRRA']))
code
128024882/cell_12
[ "text_plain_output_1.png" ]
from statsmodels.sandbox.regression import gmm import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/comp-macro/data.csv') data = data.dropna() instrument_df = data[['cons_lagged1', 'interest_lagged_1', 'lagged_cons_growth']] endog_df = data[['int_forward', 'cons_forward', 'PCE']] endog, instrument = map(np.asarray, [endog_df, instrument_df]) def moment_consumption1(params, endog): beta, gamma = params r_forw1, c_forw1, c = endog.T err = beta * (1 + r_forw1) * np.power(c_forw1 / c, -gamma) - 1 return err dependant = np.ones(endog.shape[0]) mod1 = gmm.NonlinearIVGMM(dependant, endog, instrument, moment_consumption1, k_moms=4) I = np.eye(3) res_two_way = mod1.fit([1, -1], maxiter=2, inv_weights=I) res_iterated = mod1.fit([1, -1], maxiter=100, inv_weights=I, weights_method='hac', wargs={'maxlag': 4}) print('\n\n') print(res_iterated.summary(yname='Euler Eq', xname=['discount', 'CRRA']))
code
74061207/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/sales-store-product-details/Salesstore.csv') df.isna().sum() plot_1=sns.histplot(data=df, x='Ship_Mode') plt.show() plot_2=sns.histplot(data=df, x='Order_Priority') plt.show() plot_3=sns.histplot(data=df, x='Customer_Segment') plt.show() plot_4=sns.histplot(data=df, x='Product_Category') plt.show() plot_5=sns.histplot(data=df, x='Product_Container') plt.show() plot_6=sns.barplot(data=df,x='Order_Priority',y='Profit',hue='Ship_Mode') plt.show() plot_7=sns.barplot(data=df,x='Region',y='Profit',hue='Ship_Mode') plt.xticks(rotation=45) plt.show() plot_8=sns.barplot(data=df,x='Region',y='Sales',hue='Ship_Mode') plt.xticks(rotation=45) plt.show() plot_9=sns.barplot(data=df,x='Region',y='Profit',hue='Customer_Segment') plt.xticks(rotation=45) plt.show() plot_10=sns.barplot(data=df,x='Region',y='Profit',hue='Product_Category') plt.xticks(rotation=45) plt.show() plot_11=sns.lineplot(data=df,x='Order_Quantity',y='Sales') plt.show() plot_12=sns.lmplot(data=df,x='Order_Quantity',y='Profit') plt.show() plot_14=sns.barplot(data=df,x='Product_Category',y='Profit',hue='Product_Container') plt.show() plot_11 = sns.regplot(data=df, x='Sales', y='Profit') plt.show()
code
74061207/cell_4
[ "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_6.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/sales-store-product-details/Salesstore.csv') df.isna().sum()
code