67Ayush87 commited on
Commit
b45e827
Β·
verified Β·
1 Parent(s): 641b126

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +191 -129
app.py CHANGED
@@ -13,137 +13,199 @@
13
  # result = model_d.invoke(message)
14
  # print(result.content)
15
 
16
- import os
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  import streamlit as st
18
  from langchain_community.chat_models import ChatHuggingFace
19
  from langchain_community.llms import HuggingFaceHub
20
  from langchain_core.messages import HumanMessage, SystemMessage
21
- from fpdf import FPDF
22
-
23
- # Set HuggingFace token from env or st.secrets
24
- os.environ["HUGGINGFACEHUB_API_TOKEN"] = os.getenv("keys")
25
- os.environ["HF_TOKEN"]=os.getenv('Ayush')
26
-
27
-
28
- # Topic-wise base prompts and models
29
- topic_config = {
30
- "Python": {
31
- "prompt": "Answer like a senior Python developer and coding mentor.",
32
- "model": "meta-llama/Llama-3.2-3B-Instruct"
33
- },
34
- "SQL": {
35
- "prompt": "Answer like a senior SQL engineer with industry experience.",
36
- "model": "google/gemma-3-27b-it"
37
- },
38
- "Power BI": {
39
- "prompt": "Answer like a Power BI expert helping a beginner.",
40
- "model": "mistralai/Mistral-7B-Instruct-v0.1"
41
- },
42
- "Statistics": {
43
- "prompt": "Answer like a statistics professor explaining key concepts to a student.",
44
- "model": "deepseek-ai/DeepSeek-R1"
45
- },
46
- "Machine Learning": {
47
- "prompt": "Answer like an ML mentor guiding a junior data scientist.",
48
- "model": "google/gemma-3-27b-it"
49
- },
50
- "Deep Learning": {
51
- "prompt": "Answer like a deep learning researcher with real-world insights.",
52
- "model": "meta-llama/Llama-3.2-3B-Instruct"
53
- },
54
- "Generative AI": {
55
- "prompt": "Answer like an expert in LLMs and Generative AI research.",
56
- "model": "deepseek-ai/DeepSeek-R1"
57
- }
58
- }
59
-
60
- # Experience level adjustments to prompt
61
- experience_prompts = {
62
- "Beginner": "Explain with simple language and clear examples for a beginner.",
63
- "Intermediate": "Provide a detailed answer suitable for an intermediate learner.",
64
- "Expert": "Give an in-depth and advanced explanation suitable for an expert."
65
- }
66
-
67
- # Streamlit app setup
68
- st.set_page_config(page_title="Data Science Mentor", page_icon="πŸ“˜")
69
- st.title("πŸ“˜ Data Science Mentor App")
70
-
71
- if "chat_history" not in st.session_state:
72
- st.session_state.chat_history = []
73
-
74
- # Multi-select topics
75
- selected_topics = st.multiselect("Select one or more topics:", list(topic_config.keys()), default=["Python"])
76
-
77
- # Select experience level
78
- experience_level = st.selectbox("Select mentor experience level:", list(experience_prompts.keys()))
79
-
80
- question = st.text_area("Ask your question here:")
81
-
82
- if st.button("Get Answer"):
83
- if not selected_topics:
84
- st.warning("Please select at least one topic.")
85
- elif not question.strip():
86
- st.warning("Please enter your question.")
87
- else:
88
- # Combine prompts from selected topics + experience level
89
- combined_prompt = ""
90
- models_used = set()
91
- for topic in selected_topics:
92
- base_prompt = topic_config[topic]["prompt"]
93
- combined_prompt += f"{base_prompt} "
94
- models_used.add(topic_config[topic]["model"])
95
-
96
- combined_prompt += experience_prompts[experience_level]
97
-
98
- # Choose the first model from selected topics (or could do more advanced merging)
99
- chosen_model = list(models_used)[0]
100
-
101
- # Load model
102
- llm = HuggingFaceHub(
103
- repo_id=chosen_model,
104
- model_kwargs={"temperature": 0.6, "max_new_tokens": 150}
105
- )
106
- chat_model = ChatHuggingFace(llm=llm)
107
-
108
- messages = [
109
- SystemMessage(content=combined_prompt),
110
- HumanMessage(content=question)
111
- ]
112
-
113
- with st.spinner("Mentor is typing..."):
114
- response = chat_model.invoke(messages)
115
-
116
- st.markdown("### 🧠 Mentor's Response:")
117
- st.markdown(response.content)
118
-
119
- # Save chat
120
- st.session_state.chat_history.append((selected_topics, experience_level, question, response.content))
121
-
122
- # Display chat history
123
- if st.session_state.chat_history:
124
- st.markdown("---")
125
- st.subheader("πŸ“ Chat History")
126
- for i, (topics, exp, q, a) in enumerate(st.session_state.chat_history, 1):
127
- st.markdown(f"**{i}. Topics:** {', '.join(topics)} | **Mentor Level:** {exp}")
128
- st.markdown(f"**You:** {q}")
129
- st.markdown(f"**Mentor:** {a}")
130
- st.markdown("---")
131
-
132
- # Download PDF
133
- if st.button("πŸ“„ Download PDF of this chat"):
134
- pdf = FPDF()
135
- pdf.add_page()
136
- pdf.set_font("Arial", size=12)
137
-
138
- pdf.cell(200, 10, txt="Data Science Mentor Chat History", ln=True, align="C")
139
- pdf.ln(10)
140
-
141
- for i, (topics, exp, q, a) in enumerate(st.session_state.chat_history, 1):
142
- pdf.multi_cell(0, 10, f"{i}. Topics: {', '.join(topics)} | Mentor Level: {exp}\nYou: {q}\nMentor: {a}\n\n")
143
-
144
- pdf_path = "/tmp/mentor_chat.pdf"
145
- pdf.output(pdf_path)
146
-
147
- with open(pdf_path, "rb") as f:
148
- st.download_button("πŸ“₯ Click to Download PDF", f, file_name="mentor_chat.pdf", mime="application/pdf")
149
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  # result = model_d.invoke(message)
14
  # print(result.content)
15
 
16
+ # import os
17
+ # import streamlit as st
18
+ # from langchain_community.chat_models import ChatHuggingFace
19
+ # from langchain_community.llms import HuggingFaceHub
20
+ # from langchain_core.messages import HumanMessage, SystemMessage
21
+ # from fpdf import FPDF
22
+
23
+ # # Set HuggingFace token from env or st.secrets
24
+ # os.environ["HUGGINGFACEHUB_API_TOKEN"] = os.getenv("keys")
25
+ # os.environ["HF_TOKEN"]=os.getenv('Ayush')
26
+
27
+
28
+ # # Topic-wise base prompts and models
29
+ # topic_config = {
30
+ # "Python": {
31
+ # "prompt": "Answer like a senior Python developer and coding mentor.",
32
+ # "model": "meta-llama/Llama-3.2-3B-Instruct"
33
+ # },
34
+ # "SQL": {
35
+ # "prompt": "Answer like a senior SQL engineer with industry experience.",
36
+ # "model": "google/gemma-3-27b-it"
37
+ # },
38
+ # "Power BI": {
39
+ # "prompt": "Answer like a Power BI expert helping a beginner.",
40
+ # "model": "mistralai/Mistral-7B-Instruct-v0.1"
41
+ # },
42
+ # "Statistics": {
43
+ # "prompt": "Answer like a statistics professor explaining key concepts to a student.",
44
+ # "model": "deepseek-ai/DeepSeek-R1"
45
+ # },
46
+ # "Machine Learning": {
47
+ # "prompt": "Answer like an ML mentor guiding a junior data scientist.",
48
+ # "model": "google/gemma-3-27b-it"
49
+ # },
50
+ # "Deep Learning": {
51
+ # "prompt": "Answer like a deep learning researcher with real-world insights.",
52
+ # "model": "meta-llama/Llama-3.2-3B-Instruct"
53
+ # },
54
+ # "Generative AI": {
55
+ # "prompt": "Answer like an expert in LLMs and Generative AI research.",
56
+ # "model": "deepseek-ai/DeepSeek-R1"
57
+ # }
58
+ # }
59
+
60
+ # # Experience level adjustments to prompt
61
+ # experience_prompts = {
62
+ # "Beginner": "Explain with simple language and clear examples for a beginner.",
63
+ # "Intermediate": "Provide a detailed answer suitable for an intermediate learner.",
64
+ # "Expert": "Give an in-depth and advanced explanation suitable for an expert."
65
+ # }
66
+
67
+ # # Streamlit app setup
68
+ # st.set_page_config(page_title="Data Science Mentor", page_icon="πŸ“˜")
69
+ # st.title("πŸ“˜ Data Science Mentor App")
70
+
71
+ # if "chat_history" not in st.session_state:
72
+ # st.session_state.chat_history = []
73
+
74
+ # # Multi-select topics
75
+ # selected_topics = st.multiselect("Select one or more topics:", list(topic_config.keys()), default=["Python"])
76
+
77
+ # # Select experience level
78
+ # experience_level = st.selectbox("Select mentor experience level:", list(experience_prompts.keys()))
79
+
80
+ # question = st.text_area("Ask your question here:")
81
+
82
+ # if st.button("Get Answer"):
83
+ # if not selected_topics:
84
+ # st.warning("Please select at least one topic.")
85
+ # elif not question.strip():
86
+ # st.warning("Please enter your question.")
87
+ # else:
88
+ # # Combine prompts from selected topics + experience level
89
+ # combined_prompt = ""
90
+ # models_used = set()
91
+ # for topic in selected_topics:
92
+ # base_prompt = topic_config[topic]["prompt"]
93
+ # combined_prompt += f"{base_prompt} "
94
+ # models_used.add(topic_config[topic]["model"])
95
+
96
+ # combined_prompt += experience_prompts[experience_level]
97
+
98
+ # # Choose the first model from selected topics (or could do more advanced merging)
99
+ # chosen_model = list(models_used)[0]
100
+
101
+ # # Load model
102
+ # llm = HuggingFaceHub(
103
+ # repo_id=chosen_model,
104
+ # model_kwargs={"temperature": 0.6, "max_new_tokens": 150}
105
+ # )
106
+ # chat_model = ChatHuggingFace(llm=llm)
107
+
108
+ # messages = [
109
+ # SystemMessage(content=combined_prompt),
110
+ # HumanMessage(content=question)
111
+ # ]
112
+
113
+ # with st.spinner("Mentor is typing..."):
114
+ # response = chat_model.invoke(messages)
115
+
116
+ # st.markdown("### 🧠 Mentor's Response:")
117
+ # st.markdown(response.content)
118
+
119
+ # # Save chat
120
+ # st.session_state.chat_history.append((selected_topics, experience_level, question, response.content))
121
+
122
+ # # Display chat history
123
+ # if st.session_state.chat_history:
124
+ # st.markdown("---")
125
+ # st.subheader("πŸ“ Chat History")
126
+ # for i, (topics, exp, q, a) in enumerate(st.session_state.chat_history, 1):
127
+ # st.markdown(f"**{i}. Topics:** {', '.join(topics)} | **Mentor Level:** {exp}")
128
+ # st.markdown(f"**You:** {q}")
129
+ # st.markdown(f"**Mentor:** {a}")
130
+ # st.markdown("---")
131
+
132
+ # # Download PDF
133
+ # if st.button("πŸ“„ Download PDF of this chat"):
134
+ # pdf = FPDF()
135
+ # pdf.add_page()
136
+ # pdf.set_font("Arial", size=12)
137
+
138
+ # pdf.cell(200, 10, txt="Data Science Mentor Chat History", ln=True, align="C")
139
+ # pdf.ln(10)
140
+
141
+ # for i, (topics, exp, q, a) in enumerate(st.session_state.chat_history, 1):
142
+ # pdf.multi_cell(0, 10, f"{i}. Topics: {', '.join(topics)} | Mentor Level: {exp}\nYou: {q}\nMentor: {a}\n\n")
143
+
144
+ # pdf_path = "/tmp/mentor_chat.pdf"
145
+ # pdf.output(pdf_path)
146
+
147
+ # with open(pdf_path, "rb") as f:
148
+ # st.download_button("πŸ“₯ Click to Download PDF", f, file_name="mentor_chat.pdf", mime="application/pdf")
149
+
150
+
151
  import streamlit as st
152
  from langchain_community.chat_models import ChatHuggingFace
153
  from langchain_community.llms import HuggingFaceHub
154
  from langchain_core.messages import HumanMessage, SystemMessage
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
155
 
156
+ # Directly enter or securely load your Hugging Face API token
157
+ HF_TOKEN = "your_huggingface_token_here" # πŸ” Replace with your token or use st.secrets
158
+
159
+ # Load model with token explicitly passed
160
+ llm = HuggingFaceHub(
161
+ repo_id="meta-llama/Llama-3.2-3B-Instruct",
162
+ huggingfacehub_api_token=HF_TOKEN,
163
+ model_kwargs={"temperature": 0.6, "max_new_tokens": 100}
164
+ )
165
+
166
+ chat_model = ChatHuggingFace(llm=llm)
167
+
168
+ # Streamlit UI
169
+ st.title("πŸ§ͺ Simple LLaMA Chat Test")
170
+
171
+ question = st.text_input("Ask a gaming-related question:", "Give me name of top 10 PC games of all time with description")
172
+
173
+ if st.button("Ask"):
174
+ messages = [
175
+ SystemMessage(content="Answer like you are a hardcore PC gamer"),
176
+ HumanMessage(content=question)
177
+ ]
178
+ response = chat_model.invoke(messages)
179
+ st.write("### Response:")
180
+ st.write(response.content)
181
+
182
+ import streamlit as st
183
+ from langchain_community.chat_models import ChatHuggingFace
184
+ from langchain_community.llms import HuggingFaceHub
185
+ from langchain_core.messages import HumanMessage, SystemMessage
186
+
187
+ # Directly enter or securely load your Hugging Face API token
188
+ HF_TOKEN = "your_huggingface_token_here" # πŸ” Replace with your token or use st.secrets
189
+
190
+ # Load model with token explicitly passed
191
+ llm = HuggingFaceHub(
192
+ repo_id="meta-llama/Llama-3.2-3B-Instruct",
193
+ huggingfacehub_api_token=HF_TOKEN,
194
+ model_kwargs={"temperature": 0.6, "max_new_tokens": 100}
195
+ )
196
+
197
+ chat_model = ChatHuggingFace(llm=llm)
198
+
199
+ # Streamlit UI
200
+ st.title("πŸ§ͺ Simple LLaMA Chat Test")
201
+
202
+ question = st.text_input("Ask a gaming-related question:", "Give me name of top 10 PC games of all time with description")
203
+
204
+ if st.button("Ask"):
205
+ messages = [
206
+ SystemMessage(content="Answer like you are a hardcore PC gamer"),
207
+ HumanMessage(content=question)
208
+ ]
209
+ response = chat_model.invoke(messages)
210
+ st.write("### Response:")
211
+ st.write(response.content)