Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,33 +1,98 @@
|
|
1 |
import streamlit as st
|
2 |
import os
|
3 |
from langchain_community.chat_models import ChatHuggingFace
|
4 |
-
from langchain_community.llms import
|
5 |
-
from langchain_core.messages import HumanMessage, SystemMessage
|
6 |
|
7 |
-
#
|
8 |
-
hf_token = os.getenv("Data_science")
|
9 |
os.environ["HUGGINGFACEHUB_API_KEY"] = hf_token
|
10 |
os.environ["HF_TOKEN"] = hf_token
|
11 |
|
12 |
-
#
|
13 |
-
|
14 |
repo_id="meta-llama/Llama-3.2-3B-Instruct",
|
15 |
-
|
16 |
-
|
|
|
|
|
17 |
)
|
18 |
|
19 |
-
chat_model = ChatHuggingFace(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
|
21 |
-
#
|
22 |
-
st.
|
23 |
|
24 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
|
|
1 |
import streamlit as st
|
2 |
import os
|
3 |
from langchain_community.chat_models import ChatHuggingFace
|
4 |
+
from langchain_community.llms import HuggingFaceEndpoint
|
5 |
+
from langchain_core.messages import HumanMessage, SystemMessage, AIMessage
|
6 |
|
7 |
+
# --- Hugging Face token setup ---
|
8 |
+
hf_token = os.getenv("Data_science")
|
9 |
os.environ["HUGGINGFACEHUB_API_KEY"] = hf_token
|
10 |
os.environ["HF_TOKEN"] = hf_token
|
11 |
|
12 |
+
# --- Model initialization ---
|
13 |
+
model = HuggingFaceEndpoint(
|
14 |
repo_id="meta-llama/Llama-3.2-3B-Instruct",
|
15 |
+
provider="nebius",
|
16 |
+
temperature=0.6,
|
17 |
+
max_new_tokens=300,
|
18 |
+
task="conversational"
|
19 |
)
|
20 |
|
21 |
+
chat_model = ChatHuggingFace(
|
22 |
+
llm=model,
|
23 |
+
repo_id="meta-llama/Llama-3.2-3B-Instruct",
|
24 |
+
provider="nebius",
|
25 |
+
temperature=0.6,
|
26 |
+
max_new_tokens=300,
|
27 |
+
task="conversational"
|
28 |
+
)
|
29 |
+
|
30 |
+
# --- Streamlit styles ---
|
31 |
+
st.markdown("""
|
32 |
+
<style>
|
33 |
+
.subject-btn {
|
34 |
+
display: inline-block;
|
35 |
+
margin: 0.3em;
|
36 |
+
}
|
37 |
+
.output-box {
|
38 |
+
background-color: #f9f9f9;
|
39 |
+
border-radius: 10px;
|
40 |
+
padding: 20px;
|
41 |
+
margin-top: 20px;
|
42 |
+
box-shadow: 0 2px 6px rgba(0, 0, 0, 0.05);
|
43 |
+
}
|
44 |
+
</style>
|
45 |
+
""", unsafe_allow_html=True)
|
46 |
+
|
47 |
+
# --- Session state ---
|
48 |
+
if "message_history" not in st.session_state:
|
49 |
+
st.session_state.message_history = []
|
50 |
+
|
51 |
+
if "selected_subject" not in st.session_state:
|
52 |
+
st.session_state.selected_subject = "Python"
|
53 |
+
|
54 |
+
# --- UI Header ---
|
55 |
+
st.title("π Data Science Mentor")
|
56 |
+
st.markdown("Ask subject-specific questions and get guidance based on your experience level.")
|
57 |
+
|
58 |
+
# --- Experience level ---
|
59 |
+
experience = st.selectbox("π€ Select your experience level:", ["Beginner", "Intermediate", "Expert"])
|
60 |
+
|
61 |
+
# --- Subject buttons ---
|
62 |
+
st.markdown("### π Choose a Subject:")
|
63 |
+
cols = st.columns(4)
|
64 |
+
subjects = ["Python", "SQL", "Power BI", "Statistics", "Machine Learning", "Deep Learning", "Generative AI"]
|
65 |
+
for i, subject in enumerate(subjects):
|
66 |
+
if cols[i % 4].button(subject):
|
67 |
+
st.session_state.selected_subject = subject
|
68 |
+
st.session_state.message_history = [] # Reset chat on subject change
|
69 |
+
|
70 |
+
# --- System Message ---
|
71 |
+
system_prompt = f"""
|
72 |
+
You are a highly knowledgeable data science mentor specialized in {st.session_state.selected_subject}.
|
73 |
+
Your job is to guide a {experience.lower()} learner with clear, concise, and actionable advice.
|
74 |
+
Explain concepts, best practices, and answer questions with patience and professionalism.
|
75 |
+
If relevant, include example code, use-cases, or tips.
|
76 |
+
"""
|
77 |
+
if not st.session_state.message_history:
|
78 |
+
st.session_state.message_history.append(SystemMessage(content=system_prompt))
|
79 |
|
80 |
+
# --- Chat Input ---
|
81 |
+
user_question = st.text_input(f"π¬ Ask your {st.session_state.selected_subject} question:")
|
82 |
|
83 |
+
if st.button("Ask Mentor"):
|
84 |
+
if user_question.strip():
|
85 |
+
with st.spinner("Thinking..."):
|
86 |
+
st.session_state.message_history.append(HumanMessage(content=user_question))
|
87 |
+
try:
|
88 |
+
response = chat_model.invoke(st.session_state.message_history)
|
89 |
+
st.session_state.message_history.append(AIMessage(content=response.content))
|
90 |
|
91 |
+
st.markdown('<div class="output-box">', unsafe_allow_html=True)
|
92 |
+
st.markdown("### π§ Mentor's Response:")
|
93 |
+
st.markdown(response.content)
|
94 |
+
st.markdown("</div>", unsafe_allow_html=True)
|
95 |
+
except Exception as e:
|
96 |
+
st.error(f"β Error: {e}")
|
97 |
+
else:
|
98 |
+
st.warning("β οΈ Please enter a question before submitting.")
|