Spaces:
Running
Running
Update pages/python.py
Browse files- pages/python.py +20 -32
pages/python.py
CHANGED
@@ -3,31 +3,22 @@ import os
|
|
3 |
from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
|
4 |
from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
|
5 |
|
6 |
-
|
7 |
-
|
8 |
hf = os.getenv('Data_science')
|
9 |
os.environ['HUGGINGFACEHUB_API_TOKEN'] = hf
|
10 |
os.environ['HF_TOKEN'] = hf
|
|
|
11 |
# --- Config ---
|
12 |
-
st.set_page_config(page_title="
|
13 |
-
st.title("
|
14 |
|
15 |
# --- Sidebar for selections ---
|
16 |
st.sidebar.title("Mentor Preferences")
|
17 |
-
|
18 |
-
|
19 |
-
exp = st.sidebar.selectbox("Select experience:", exp1)
|
20 |
-
|
21 |
-
# Map experience to label
|
22 |
-
experience_map = {
|
23 |
-
'<1': 'New bie mentor',
|
24 |
-
'1': '1', '2': '2', '3': '3', '4': '4', '5': '5',
|
25 |
-
'5+': 'Professional'
|
26 |
-
}
|
27 |
-
experience_label = experience_map[exp]
|
28 |
|
29 |
# --- Initialize Chat Model ---
|
30 |
-
|
31 |
repo_id='meta-llama/Llama-3.2-3B-Instruct',
|
32 |
provider='sambanova',
|
33 |
temperature=0.7,
|
@@ -35,8 +26,8 @@ deep_seek_skeleton = HuggingFaceEndpoint(
|
|
35 |
task='conversational'
|
36 |
)
|
37 |
|
38 |
-
|
39 |
-
llm=
|
40 |
repo_id='meta-llama/Llama-3.2-3B-Instruct',
|
41 |
provider='sambanova',
|
42 |
temperature=0.7,
|
@@ -45,8 +36,8 @@ deep_seek = ChatHuggingFace(
|
|
45 |
)
|
46 |
|
47 |
# --- Session State ---
|
48 |
-
if "
|
49 |
-
st.session_state.
|
50 |
|
51 |
# --- Chat Form ---
|
52 |
with st.form(key="chat_form"):
|
@@ -55,21 +46,18 @@ with st.form(key="chat_form"):
|
|
55 |
|
56 |
# --- Chat Logic ---
|
57 |
if submit and user_input:
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
|
|
62 |
messages = [SystemMessage(content=system_prompt), HumanMessage(content=user_input)]
|
63 |
-
|
64 |
-
|
65 |
-
result = deep_seek.invoke(messages)
|
66 |
-
|
67 |
-
# Append to history
|
68 |
-
st.session_state.chat_history.append((user_input, result.content))
|
69 |
|
70 |
# --- Display Chat History ---
|
71 |
st.subheader("🗨️ Chat History")
|
72 |
-
for
|
73 |
st.markdown(f"**You:** {user}")
|
74 |
st.markdown(f"**Mentor:** {bot}")
|
75 |
-
st.markdown("---")
|
|
|
3 |
from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
|
4 |
from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
|
5 |
|
6 |
+
# Load API token from environment
|
|
|
7 |
hf = os.getenv('Data_science')
|
8 |
os.environ['HUGGINGFACEHUB_API_TOKEN'] = hf
|
9 |
os.environ['HF_TOKEN'] = hf
|
10 |
+
|
11 |
# --- Config ---
|
12 |
+
st.set_page_config(page_title="Python Mentor Chat", layout="centered")
|
13 |
+
st.title("🐍 Python Mentor Chat")
|
14 |
|
15 |
# --- Sidebar for selections ---
|
16 |
st.sidebar.title("Mentor Preferences")
|
17 |
+
exp_options = ['Beginner', 'Intermediate', 'Experienced']
|
18 |
+
exp = st.sidebar.selectbox("Select your experience level:", exp_options)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
# --- Initialize Chat Model ---
|
21 |
+
python_model_skeleton = HuggingFaceEndpoint(
|
22 |
repo_id='meta-llama/Llama-3.2-3B-Instruct',
|
23 |
provider='sambanova',
|
24 |
temperature=0.7,
|
|
|
26 |
task='conversational'
|
27 |
)
|
28 |
|
29 |
+
python_mentor = ChatHuggingFace(
|
30 |
+
llm=python_model_skeleton,
|
31 |
repo_id='meta-llama/Llama-3.2-3B-Instruct',
|
32 |
provider='sambanova',
|
33 |
temperature=0.7,
|
|
|
36 |
)
|
37 |
|
38 |
# --- Session State ---
|
39 |
+
if "chat_history_python" not in st.session_state:
|
40 |
+
st.session_state.chat_history_python = []
|
41 |
|
42 |
# --- Chat Form ---
|
43 |
with st.form(key="chat_form"):
|
|
|
46 |
|
47 |
# --- Chat Logic ---
|
48 |
if submit and user_input:
|
49 |
+
system_prompt = (
|
50 |
+
f"You are a Python mentor with {exp.lower()} experience level. "
|
51 |
+
f"Answer only Python-related questions in a very friendly tone and under 150 words. "
|
52 |
+
f"If the question is not about Python, politely say it's out of scope."
|
53 |
+
)
|
54 |
messages = [SystemMessage(content=system_prompt), HumanMessage(content=user_input)]
|
55 |
+
result = python_mentor.invoke(messages)
|
56 |
+
st.session_state.chat_history_python.append((user_input, result.content))
|
|
|
|
|
|
|
|
|
57 |
|
58 |
# --- Display Chat History ---
|
59 |
st.subheader("🗨️ Chat History")
|
60 |
+
for user, bot in st.session_state.chat_history_python:
|
61 |
st.markdown(f"**You:** {user}")
|
62 |
st.markdown(f"**Mentor:** {bot}")
|
63 |
+
st.markdown("---")
|