67Ayush87 commited on
Commit
a5d14c5
Β·
verified Β·
1 Parent(s): 470de69

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +85 -20
app.py CHANGED
@@ -1,33 +1,98 @@
1
  import streamlit as st
2
  import os
3
  from langchain_community.chat_models import ChatHuggingFace
4
- from langchain_community.llms import HuggingFaceHub
5
- from langchain_core.messages import HumanMessage, SystemMessage
6
 
7
- # βœ… Load your secret token from Hugging Face Space secrets
8
- hf_token = os.getenv("Data_science") # Make sure "Data_science" is set in Space secrets
9
  os.environ["HUGGINGFACEHUB_API_KEY"] = hf_token
10
  os.environ["HF_TOKEN"] = hf_token
11
 
12
- # βœ… Initialize the LLM with your token
13
- llm = HuggingFaceHub(
14
  repo_id="meta-llama/Llama-3.2-3B-Instruct",
15
- huggingfacehub_api_token=hf_token, # <-- use hf_token here
16
- model_kwargs={"temperature": 0.6, "max_new_tokens": 100}
 
 
17
  )
18
 
19
- chat_model = ChatHuggingFace(llm=llm)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
- # βœ… Streamlit UI
22
- st.title("🧠 LLaMA Chat (Data Science Mentor)")
23
 
24
- question = st.text_input("Ask any question about data science topics:")
 
 
 
 
 
 
25
 
26
- if st.button("Ask") and question.strip():
27
- messages = [
28
- SystemMessage(content="You are a data science mentor."),
29
- HumanMessage(content=question)
30
- ]
31
- response = chat_model.invoke(messages)
32
- st.write("### Mentor's Response:")
33
- st.write(response.content)
 
1
  import streamlit as st
2
  import os
3
  from langchain_community.chat_models import ChatHuggingFace
4
+ from langchain_community.llms import HuggingFaceEndpoint
5
+ from langchain_core.messages import HumanMessage, SystemMessage, AIMessage
6
 
7
+ # --- Hugging Face token setup ---
8
+ hf_token = os.getenv("Data_science")
9
  os.environ["HUGGINGFACEHUB_API_KEY"] = hf_token
10
  os.environ["HF_TOKEN"] = hf_token
11
 
12
+ # --- Model initialization ---
13
+ model = HuggingFaceEndpoint(
14
  repo_id="meta-llama/Llama-3.2-3B-Instruct",
15
+ provider="nebius",
16
+ temperature=0.6,
17
+ max_new_tokens=300,
18
+ task="conversational"
19
  )
20
 
21
+ chat_model = ChatHuggingFace(
22
+ llm=model,
23
+ repo_id="meta-llama/Llama-3.2-3B-Instruct",
24
+ provider="nebius",
25
+ temperature=0.6,
26
+ max_new_tokens=300,
27
+ task="conversational"
28
+ )
29
+
30
+ # --- Streamlit styles ---
31
+ st.markdown("""
32
+ <style>
33
+ .subject-btn {
34
+ display: inline-block;
35
+ margin: 0.3em;
36
+ }
37
+ .output-box {
38
+ background-color: #f9f9f9;
39
+ border-radius: 10px;
40
+ padding: 20px;
41
+ margin-top: 20px;
42
+ box-shadow: 0 2px 6px rgba(0, 0, 0, 0.05);
43
+ }
44
+ </style>
45
+ """, unsafe_allow_html=True)
46
+
47
+ # --- Session state ---
48
+ if "message_history" not in st.session_state:
49
+ st.session_state.message_history = []
50
+
51
+ if "selected_subject" not in st.session_state:
52
+ st.session_state.selected_subject = "Python"
53
+
54
+ # --- UI Header ---
55
+ st.title("πŸŽ“ Data Science Mentor")
56
+ st.markdown("Ask subject-specific questions and get guidance based on your experience level.")
57
+
58
+ # --- Experience level ---
59
+ experience = st.selectbox("πŸ‘€ Select your experience level:", ["Beginner", "Intermediate", "Expert"])
60
+
61
+ # --- Subject buttons ---
62
+ st.markdown("### πŸ“š Choose a Subject:")
63
+ cols = st.columns(4)
64
+ subjects = ["Python", "SQL", "Power BI", "Statistics", "Machine Learning", "Deep Learning", "Generative AI"]
65
+ for i, subject in enumerate(subjects):
66
+ if cols[i % 4].button(subject):
67
+ st.session_state.selected_subject = subject
68
+ st.session_state.message_history = [] # Reset chat on subject change
69
+
70
+ # --- System Message ---
71
+ system_prompt = f"""
72
+ You are a highly knowledgeable data science mentor specialized in {st.session_state.selected_subject}.
73
+ Your job is to guide a {experience.lower()} learner with clear, concise, and actionable advice.
74
+ Explain concepts, best practices, and answer questions with patience and professionalism.
75
+ If relevant, include example code, use-cases, or tips.
76
+ """
77
+ if not st.session_state.message_history:
78
+ st.session_state.message_history.append(SystemMessage(content=system_prompt))
79
 
80
+ # --- Chat Input ---
81
+ user_question = st.text_input(f"πŸ’¬ Ask your {st.session_state.selected_subject} question:")
82
 
83
+ if st.button("Ask Mentor"):
84
+ if user_question.strip():
85
+ with st.spinner("Thinking..."):
86
+ st.session_state.message_history.append(HumanMessage(content=user_question))
87
+ try:
88
+ response = chat_model.invoke(st.session_state.message_history)
89
+ st.session_state.message_history.append(AIMessage(content=response.content))
90
 
91
+ st.markdown('<div class="output-box">', unsafe_allow_html=True)
92
+ st.markdown("### 🧠 Mentor's Response:")
93
+ st.markdown(response.content)
94
+ st.markdown("</div>", unsafe_allow_html=True)
95
+ except Exception as e:
96
+ st.error(f"❌ Error: {e}")
97
+ else:
98
+ st.warning("⚠️ Please enter a question before submitting.")