67Ayush87 commited on
Commit
38d1603
Β·
verified Β·
1 Parent(s): 4ea3c16

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +126 -25
app.py CHANGED
@@ -14,34 +14,135 @@
14
  # result = model_d.invoke(message)
15
  # print(result.content)
16
 
 
17
  import streamlit as st
18
  from langchain_community.chat_models import ChatHuggingFace
19
  from langchain_community.llms import HuggingFaceHub
20
  from langchain_core.messages import HumanMessage, SystemMessage
 
21
 
22
- # Setup API key (replace with your key or use st.secrets)
23
- import os
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
- HF_TOKEN = "Ayush"
26
-
27
- # Load model
28
- llm = HuggingFaceHub(
29
- repo_id="meta-llama/Llama-3.2-3B-Instruct",
30
- huggingfacehub_api_token=HF_TOKEN,
31
- model_kwargs={"temperature": 0.6, "max_new_tokens": 100}
32
- )
33
- chat_model = ChatHuggingFace(llm=llm)
34
-
35
- # Streamlit UI
36
- st.title("πŸ§ͺ Simple LLaMA Chat Test")
37
-
38
- question = st.text_input("Ask a gaming-related question:", "Give me name of top 10 PC games of all time with description")
39
-
40
- if st.button("Ask"):
41
- messages = [
42
- SystemMessage(content="Answer like you are a hardcore PC gamer"),
43
- HumanMessage(content=question)
44
- ]
45
- response = chat_model.invoke(messages)
46
- st.write("### Response:")
47
- st.write(response.content)
 
14
  # result = model_d.invoke(message)
15
  # print(result.content)
16
 
17
+ import os
18
  import streamlit as st
19
  from langchain_community.chat_models import ChatHuggingFace
20
  from langchain_community.llms import HuggingFaceHub
21
  from langchain_core.messages import HumanMessage, SystemMessage
22
+ from fpdf import FPDF
23
 
24
+ # Set HuggingFace token from env or st.secrets
25
+ os.environ["HUGGINGFACEHUB_API_TOKEN"] = os.getenv("Ayush")
26
+
27
+ # Topic-wise base prompts and models
28
+ topic_config = {
29
+ "Python": {
30
+ "prompt": "Answer like a senior Python developer and coding mentor.",
31
+ "model": "meta-llama/Llama-3.2-3B-Instruct"
32
+ },
33
+ "SQL": {
34
+ "prompt": "Answer like a senior SQL engineer with industry experience.",
35
+ "model": "google/gemma-3-27b-it"
36
+ },
37
+ "Power BI": {
38
+ "prompt": "Answer like a Power BI expert helping a beginner.",
39
+ "model": "mistralai/Mistral-7B-Instruct-v0.1"
40
+ },
41
+ "Statistics": {
42
+ "prompt": "Answer like a statistics professor explaining key concepts to a student.",
43
+ "model": "deepseek-ai/DeepSeek-R1"
44
+ },
45
+ "Machine Learning": {
46
+ "prompt": "Answer like an ML mentor guiding a junior data scientist.",
47
+ "model": "google/gemma-3-27b-it"
48
+ },
49
+ "Deep Learning": {
50
+ "prompt": "Answer like a deep learning researcher with real-world insights.",
51
+ "model": "meta-llama/Llama-3.2-3B-Instruct"
52
+ },
53
+ "Generative AI": {
54
+ "prompt": "Answer like an expert in LLMs and Generative AI research.",
55
+ "model": "deepseek-ai/DeepSeek-R1"
56
+ }
57
+ }
58
+
59
+ # Experience level adjustments to prompt
60
+ experience_prompts = {
61
+ "Beginner": "Explain with simple language and clear examples for a beginner.",
62
+ "Intermediate": "Provide a detailed answer suitable for an intermediate learner.",
63
+ "Expert": "Give an in-depth and advanced explanation suitable for an expert."
64
+ }
65
+
66
+ # Streamlit app setup
67
+ st.set_page_config(page_title="Data Science Mentor", page_icon="πŸ“˜")
68
+ st.title("πŸ“˜ Data Science Mentor App")
69
+
70
+ if "chat_history" not in st.session_state:
71
+ st.session_state.chat_history = []
72
+
73
+ # Multi-select topics
74
+ selected_topics = st.multiselect("Select one or more topics:", list(topic_config.keys()), default=["Python"])
75
+
76
+ # Select experience level
77
+ experience_level = st.selectbox("Select mentor experience level:", list(experience_prompts.keys()))
78
+
79
+ question = st.text_area("Ask your question here:")
80
+
81
+ if st.button("Get Answer"):
82
+ if not selected_topics:
83
+ st.warning("Please select at least one topic.")
84
+ elif not question.strip():
85
+ st.warning("Please enter your question.")
86
+ else:
87
+ # Combine prompts from selected topics + experience level
88
+ combined_prompt = ""
89
+ models_used = set()
90
+ for topic in selected_topics:
91
+ base_prompt = topic_config[topic]["prompt"]
92
+ combined_prompt += f"{base_prompt} "
93
+ models_used.add(topic_config[topic]["model"])
94
+
95
+ combined_prompt += experience_prompts[experience_level]
96
+
97
+ # Choose the first model from selected topics (or could do more advanced merging)
98
+ chosen_model = list(models_used)[0]
99
+
100
+ # Load model
101
+ llm = HuggingFaceHub(
102
+ repo_id=chosen_model,
103
+ model_kwargs={"temperature": 0.6, "max_new_tokens": 150}
104
+ )
105
+ chat_model = ChatHuggingFace(llm=llm)
106
+
107
+ messages = [
108
+ SystemMessage(content=combined_prompt),
109
+ HumanMessage(content=question)
110
+ ]
111
+
112
+ with st.spinner("Mentor is typing..."):
113
+ response = chat_model.invoke(messages)
114
+
115
+ st.markdown("### 🧠 Mentor's Response:")
116
+ st.markdown(response.content)
117
+
118
+ # Save chat
119
+ st.session_state.chat_history.append((selected_topics, experience_level, question, response.content))
120
+
121
+ # Display chat history
122
+ if st.session_state.chat_history:
123
+ st.markdown("---")
124
+ st.subheader("πŸ“ Chat History")
125
+ for i, (topics, exp, q, a) in enumerate(st.session_state.chat_history, 1):
126
+ st.markdown(f"**{i}. Topics:** {', '.join(topics)} | **Mentor Level:** {exp}")
127
+ st.markdown(f"**You:** {q}")
128
+ st.markdown(f"**Mentor:** {a}")
129
+ st.markdown("---")
130
+
131
+ # Download PDF
132
+ if st.button("πŸ“„ Download PDF of this chat"):
133
+ pdf = FPDF()
134
+ pdf.add_page()
135
+ pdf.set_font("Arial", size=12)
136
+
137
+ pdf.cell(200, 10, txt="Data Science Mentor Chat History", ln=True, align="C")
138
+ pdf.ln(10)
139
+
140
+ for i, (topics, exp, q, a) in enumerate(st.session_state.chat_history, 1):
141
+ pdf.multi_cell(0, 10, f"{i}. Topics: {', '.join(topics)} | Mentor Level: {exp}\nYou: {q}\nMentor: {a}\n\n")
142
+
143
+ pdf_path = "/tmp/mentor_chat.pdf"
144
+ pdf.output(pdf_path)
145
+
146
+ with open(pdf_path, "rb") as f:
147
+ st.download_button("πŸ“₯ Click to Download PDF", f, file_name="mentor_chat.pdf", mime="application/pdf")
148