Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,75 @@
|
|
1 |
import streamlit as st
|
|
|
|
|
2 |
|
3 |
-
st.
|
4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
+
from transformers import pipeline
|
3 |
+
import torch
|
4 |
|
5 |
+
st.set_page_config(page_title="Data Science Mentor", layout="wide")
|
6 |
+
|
7 |
+
# Cache models to avoid reloading
|
8 |
+
@st.cache_resource
|
9 |
+
def load_model(topic):
|
10 |
+
# Select model based on topic
|
11 |
+
if topic == "Python":
|
12 |
+
return pipeline("text-generation", model="tiiuae/falcon-7b-instruct", device=0 if torch.cuda.is_available() else -1)
|
13 |
+
elif topic == "GenAI":
|
14 |
+
return pipeline("text2text-generation", model="google/flan-t5-large", device=0 if torch.cuda.is_available() else -1)
|
15 |
+
elif topic == "Statistics":
|
16 |
+
return pipeline("text-generation", model="databricks/dolly-v2-3b", device=0 if torch.cuda.is_available() else -1)
|
17 |
+
elif topic == "SQL":
|
18 |
+
return pipeline("text2text-generation", model="google/flan-t5-base", device=0 if torch.cuda.is_available() else -1)
|
19 |
+
else:
|
20 |
+
# Fallback for Power BI, ML, DL
|
21 |
+
return pipeline("text-generation", model="tiiuae/falcon-7b-instruct", device=0 if torch.cuda.is_available() else -1)
|
22 |
+
|
23 |
+
def generate_answer(model, topic, level, question):
|
24 |
+
prompt = f"You are a {level} level mentor in {topic}. Answer the following question in detail:\n{question}"
|
25 |
+
|
26 |
+
if "text-generation" in model.task:
|
27 |
+
output = model(prompt, max_length=256, do_sample=True, top_k=50)
|
28 |
+
answer = output[0]['generated_text']
|
29 |
+
else:
|
30 |
+
output = model(prompt, max_length=256)
|
31 |
+
answer = output[0]['generated_text']
|
32 |
+
|
33 |
+
# Remove prompt from answer if echoed
|
34 |
+
if answer.lower().startswith(prompt.lower()):
|
35 |
+
answer = answer[len(prompt):].strip()
|
36 |
+
return answer
|
37 |
+
|
38 |
+
# --- Streamlit UI ---
|
39 |
+
|
40 |
+
st.title("π€ Data Science Mentor")
|
41 |
+
|
42 |
+
with st.sidebar:
|
43 |
+
st.header("Configure Your Mentor")
|
44 |
+
topic = st.radio("Select Topic:", ["Python", "GenAI", "Statistics", "Power BI", "SQL", "Machine Learning", "Deep Learning"])
|
45 |
+
level = st.radio("Select Experience Level:", ["Beginner", "Intermediate", "Advanced"])
|
46 |
+
|
47 |
+
# Load model for topic
|
48 |
+
model = load_model(topic)
|
49 |
+
|
50 |
+
if "chat_history" not in st.session_state:
|
51 |
+
st.session_state.chat_history = []
|
52 |
+
|
53 |
+
st.subheader(f"Ask your {topic} question:")
|
54 |
+
user_input = st.text_area("Type your question here:", height=100)
|
55 |
+
|
56 |
+
if st.button("Get Answer"):
|
57 |
+
if user_input.strip() == "":
|
58 |
+
st.warning("Please enter a question.")
|
59 |
+
else:
|
60 |
+
with st.spinner("Mentor is thinking..."):
|
61 |
+
answer = generate_answer(model, topic, level, user_input)
|
62 |
+
st.session_state.chat_history.append(("You", user_input))
|
63 |
+
st.session_state.chat_history.append(("Mentor", answer))
|
64 |
+
|
65 |
+
# Display chat history
|
66 |
+
if st.session_state.chat_history:
|
67 |
+
for i in range(0, len(st.session_state.chat_history), 2):
|
68 |
+
user_msg = st.session_state.chat_history[i][1]
|
69 |
+
mentor_msg = st.session_state.chat_history[i+1][1] if i+1 < len(st.session_state.chat_history) else ""
|
70 |
+
st.markdown(f"**You:** {user_msg}")
|
71 |
+
st.markdown(f"**Mentor:** {mentor_msg}")
|
72 |
+
st.markdown("---")
|
73 |
+
|
74 |
+
if st.button("Clear Chat"):
|
75 |
+
st.session_state.chat_history = []
|