varalakshmi55 commited on
Commit
2093cd1
·
verified ·
1 Parent(s): 829c8a0

Delete testpklfiles.py

Browse files
Files changed (1) hide show
  1. testpklfiles.py +0 -79
testpklfiles.py DELETED
@@ -1,79 +0,0 @@
1
- # generate_pickles.py
2
-
3
- import pandas as pd
4
- import numpy as np
5
- import pickle
6
- import streamlit as st
7
- from sklearn.model_selection import train_test_split
8
- from sklearn.preprocessing import LabelEncoder, StandardScaler
9
- from xgboost import XGBClassifier
10
-
11
- st.title("📦 Data Preprocessing and Pickle Saver")
12
-
13
- @st.cache_data
14
- def load_sampled_data():
15
- df3 = pd.read_parquet("train_series.parquet", columns=['series_id', 'step', 'anglez', 'enmo'])
16
- df4 = pd.read_parquet("test_series.parquet", columns=['series_id', 'step', 'anglez', 'enmo'])
17
- df2 = pd.read_csv("train_events.csv")
18
-
19
- # Sample safely based on available data
20
- df3_sample = df3.sample(n=min(5_000_000, len(df3)), random_state=42)
21
- df4_sample = df4.sample(n=min(1_000_000, len(df4)), random_state=42)
22
-
23
- return df3_sample, df4_sample, df2
24
-
25
- # Load
26
- df3, df4, df2 = load_sampled_data()
27
- df = pd.concat([df3, df4], axis=0, ignore_index=True)
28
- merged_df = pd.merge(df, df2, on=['series_id', 'step'], how='inner')
29
-
30
- # Rename timestamp columns if they exist
31
- if 'timestamp_x' in merged_df.columns:
32
- merged_df.rename(columns={'timestamp_x': 'sensor_timestamp'}, inplace=True)
33
- if 'timestamp_y' in merged_df.columns:
34
- merged_df.rename(columns={'timestamp_y': 'event_timestamp'}, inplace=True)
35
-
36
- # Drop only columns that exist
37
- required_cols = ['night', 'event', 'event_timestamp']
38
- existing_cols = [col for col in required_cols if col in merged_df.columns]
39
- merged_df.dropna(subset=existing_cols, inplace=True)
40
- merged_df.reset_index(drop=True, inplace=True)
41
-
42
- # Convert timestamps and calculate sleep duration
43
- if 'event_timestamp' in merged_df.columns and 'sensor_timestamp' in merged_df.columns:
44
- merged_df['event_timestamp'] = pd.to_datetime(merged_df['event_timestamp'], errors='coerce', utc=True)
45
- merged_df['sensor_timestamp'] = pd.to_datetime(merged_df['sensor_timestamp'], errors='coerce', utc=True)
46
- merged_df['sleep_duration_hrs'] = (merged_df['sensor_timestamp'] - merged_df['event_timestamp']).dt.total_seconds() / 3600
47
- merged_df.dropna(subset=['sensor_timestamp', 'event_timestamp', 'sleep_duration_hrs'], inplace=True)
48
-
49
- # Encode
50
- le = LabelEncoder()
51
- merged_df['series_id'] = le.fit_transform(merged_df['series_id'])
52
- merged_df['event'] = le.fit_transform(merged_df['event'])
53
-
54
- # Drop columns with string or datetime values
55
- drop_cols = ['sensor_timestamp', 'event_timestamp', 'night', 'step', 'sleep_duration_hrs', 'series_id']
56
- df_cleaned = merged_df.drop(columns=[col for col in drop_cols if col in merged_df.columns])
57
-
58
- # Ensure only numeric features in X
59
- X = df_cleaned.drop('event', axis=1).select_dtypes(include=[np.number])
60
- y = merged_df['event']
61
-
62
- # Split and scale
63
- X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=27)
64
- scaler = StandardScaler()
65
- X_train_scaled = scaler.fit_transform(X_train)
66
- X_test_scaled = scaler.transform(X_test)
67
-
68
- # Train model
69
- model = XGBClassifier(n_estimators=100, max_depth=3, learning_rate=0.1, eval_metric='logloss')
70
- model.fit(X_train_scaled, y_train)
71
-
72
- # Save pickles
73
- with open("model.pkl", "wb") as f: pickle.dump(model, f)
74
- with open("scaler.pkl", "wb") as f: pickle.dump(scaler, f)
75
- with open("label_encoder.pkl", "wb") as f: pickle.dump(le, f)
76
- with open("X_test.pkl", "wb") as f: pickle.dump(X_test_scaled, f)
77
- with open("y_test.pkl", "wb") as f: pickle.dump(y_test, f)
78
-
79
- st.success("✅ Pickle files saved successfully.")