import streamlit as st import numpy as np import mlxtend import seaborn as sns import matplotlib.pyplot as plt from sklearn.datasets import make_moons, make_circles, make_blobs from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense,InputLayer from tensorflow.keras.optimizers import SGD from tensorflow.keras import regularizers from mlxtend.plotting import plot_decision_regions st.markdown("""""",unsafe_allow_html=True) st.write("## A Neural Network Playground") st.sidebar.write("### Dataset Settings") n_samples = st.sidebar.number_input("No.of Samples",min_value=300,max_value=2000,step = 100,value=500) dataset_name = st.sidebar.selectbox("Dataset", ["Blobs","Moons", "Circles"]) test_ratio = st.sidebar.slider("Test Ratio", 0.1, 0.5, 0.2, 0.05) noise = st.sidebar.slider("Noise", 0.0, 0.5, 0.1, 0.01) all = int(n_samples - (n_samples*test_ratio)) batch_size = st.sidebar.slider("Batch Size",40,all) def generate_dataset(name, noise, test_ratio): if name == "Moons": x, y = make_moons(n_samples=n_samples, noise=noise, random_state=42) elif name == "Circles": x, y = make_circles(n_samples=n_samples, noise=noise, random_state=42) else: x, y = make_blobs(n_samples=n_samples, centers=2, random_state=42, cluster_std=1.5) return train_test_split(x, y, test_size=test_ratio, random_state=42) x_train, x_test, y_train, y_test = generate_dataset(dataset_name, noise, test_ratio) std = StandardScaler() x_train = std.fit_transform(x_train) x_test = std.transform(x_test) col1, col2, col3, col4, col5 = st.columns(5) epochs = col1.number_input("Epochs",min_value=100,max_value=500,step=10) lr = col2.selectbox("Learning Rate", [0.1,0.0001,0.01,0.03,1,3,10]) activation = col3.selectbox("Activation", ["sigmoid", "tanh", "relu"]) regularization = col4.selectbox("Regularization", ["None", "L1", "L2"]) reg_rate = col5.selectbox("Reg Rate",[0,0.001,0.01,0.03,0.1,1,3,10]) st.write("### Hidden Layers") if "layers" not in st.session_state: st.session_state.layers = [2] colA, colB = st.columns(2) if colA.button("➕ Add Layer"): st.session_state.layers.append(2) if colB.button("➖ Remove Layer") and len(st.session_state.layers) > 1: st.session_state.layers.pop() cola , colb = st.columns(2) with colb: layer_neurons = [] st.write("###### Configure Neurons per Layer") for i, neurons in enumerate(st.session_state.layers): n = st.slider(f"Layer {i+1} Neurons", 1, 20, neurons, 1) layer_neurons.append(n) with cola: fig, ax = plt.subplots(figsize=(4,3)) sns.scatterplot(x=x_train[:,0], y=x_train[:,1], hue=y_train, s=15) ax.set_title("Before Training") st.pyplot(fig) if st.button("🚀Start Training"): model = Sequential() if regularization == "L1": reg = regularizers.l1(reg_rate) elif regularization == "L2": reg = regularizers.l2(reg_rate) else: reg = None model.add(InputLayer(input_shape=(2,))) for n in layer_neurons[0:]: model.add(Dense(n, activation=activation, kernel_regularizer=reg)) model.add(Dense(1, activation="sigmoid")) model.compile(optimizer=SGD(learning_rate=lr), loss="binary_crossentropy", metrics=["accuracy"]) hist = model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size, verbose=0, validation_data=(x_test, y_test)) col1, col2 = st.columns(2) fig1, ax1 = plt.subplots(figsize=(4,3)) plot_decision_regions(X=x_train,y=y_train,clf=model) plt.title("Decision Boundary") plt.legend() col1.pyplot(fig1) fig2, ax2 = plt.subplots(figsize=(4,3)) ax2.plot(hist.history["loss"], label="Train Loss") ax2.plot(hist.history["val_loss"], label="Val Loss") ax2.legend() ax2.set_title("Loss vs Epochs") col2.pyplot(fig2) st.write(f"##### Training Loss: {hist.history['loss'][-1]:.3f}") st.write(f"##### Validation Loss: {hist.history['val_loss'][-1]:.3f}")