67Ayush87 commited on
Commit
470de69
·
verified ·
1 Parent(s): 83e453f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -185
app.py CHANGED
@@ -1,165 +1,18 @@
1
- # import os
2
- # import langchain
3
- # import langchain_huggingface
4
- # from langchain_huggingface import HuggingFaceEndpoint,HuggingFacePipeline, ChatHuggingFace
5
- # from langchain_core.messages import HumanMessage, SystemMessage, AIMessage
6
-
7
- # os.environ["HUGGINGFACEHUB_API_KEY"]=os.getenv('Ayush')
8
-
9
- # llama_model = HuggingFaceEndpoint(repo_id= "meta-llama/Llama-3.2-3B-Instruct",provider= "nebius",temperature=0.6, max_new_tokens=70,task="conversational")
10
- # model_d=ChatHuggingFace(llm =llama_model,repo_id= "meta-llama/Llama-3.2-3B-Instruct",provider= "nebius",temperature=0.6, max_new_tokens=70,task="conversational")
11
- # message = [SystemMessage(content = "Answer like you are a hardcore pc gamer"),
12
- # HumanMessage(content = "Give me name of top 10 pc games of all time with description")]
13
- # result = model_d.invoke(message)
14
- # print(result.content)
15
-
16
- # import os
17
- # import streamlit as st
18
- # from langchain_community.chat_models import ChatHuggingFace
19
- # from langchain_community.llms import HuggingFaceHub
20
- # from langchain_core.messages import HumanMessage, SystemMessage
21
- # from fpdf import FPDF
22
-
23
- # # Set HuggingFace token from env or st.secrets
24
- # os.environ["HUGGINGFACEHUB_API_TOKEN"] = os.getenv("keys")
25
- # os.environ["HF_TOKEN"]=os.getenv('Ayush')
26
-
27
-
28
- # # Topic-wise base prompts and models
29
- # topic_config = {
30
- # "Python": {
31
- # "prompt": "Answer like a senior Python developer and coding mentor.",
32
- # "model": "meta-llama/Llama-3.2-3B-Instruct"
33
- # },
34
- # "SQL": {
35
- # "prompt": "Answer like a senior SQL engineer with industry experience.",
36
- # "model": "google/gemma-3-27b-it"
37
- # },
38
- # "Power BI": {
39
- # "prompt": "Answer like a Power BI expert helping a beginner.",
40
- # "model": "mistralai/Mistral-7B-Instruct-v0.1"
41
- # },
42
- # "Statistics": {
43
- # "prompt": "Answer like a statistics professor explaining key concepts to a student.",
44
- # "model": "deepseek-ai/DeepSeek-R1"
45
- # },
46
- # "Machine Learning": {
47
- # "prompt": "Answer like an ML mentor guiding a junior data scientist.",
48
- # "model": "google/gemma-3-27b-it"
49
- # },
50
- # "Deep Learning": {
51
- # "prompt": "Answer like a deep learning researcher with real-world insights.",
52
- # "model": "meta-llama/Llama-3.2-3B-Instruct"
53
- # },
54
- # "Generative AI": {
55
- # "prompt": "Answer like an expert in LLMs and Generative AI research.",
56
- # "model": "deepseek-ai/DeepSeek-R1"
57
- # }
58
- # }
59
-
60
- # # Experience level adjustments to prompt
61
- # experience_prompts = {
62
- # "Beginner": "Explain with simple language and clear examples for a beginner.",
63
- # "Intermediate": "Provide a detailed answer suitable for an intermediate learner.",
64
- # "Expert": "Give an in-depth and advanced explanation suitable for an expert."
65
- # }
66
-
67
- # # Streamlit app setup
68
- # st.set_page_config(page_title="Data Science Mentor", page_icon="📘")
69
- # st.title("📘 Data Science Mentor App")
70
-
71
- # if "chat_history" not in st.session_state:
72
- # st.session_state.chat_history = []
73
-
74
- # # Multi-select topics
75
- # selected_topics = st.multiselect("Select one or more topics:", list(topic_config.keys()), default=["Python"])
76
-
77
- # # Select experience level
78
- # experience_level = st.selectbox("Select mentor experience level:", list(experience_prompts.keys()))
79
-
80
- # question = st.text_area("Ask your question here:")
81
-
82
- # if st.button("Get Answer"):
83
- # if not selected_topics:
84
- # st.warning("Please select at least one topic.")
85
- # elif not question.strip():
86
- # st.warning("Please enter your question.")
87
- # else:
88
- # # Combine prompts from selected topics + experience level
89
- # combined_prompt = ""
90
- # models_used = set()
91
- # for topic in selected_topics:
92
- # base_prompt = topic_config[topic]["prompt"]
93
- # combined_prompt += f"{base_prompt} "
94
- # models_used.add(topic_config[topic]["model"])
95
-
96
- # combined_prompt += experience_prompts[experience_level]
97
-
98
- # # Choose the first model from selected topics (or could do more advanced merging)
99
- # chosen_model = list(models_used)[0]
100
-
101
- # # Load model
102
- # llm = HuggingFaceHub(
103
- # repo_id=chosen_model,
104
- # model_kwargs={"temperature": 0.6, "max_new_tokens": 150}
105
- # )
106
- # chat_model = ChatHuggingFace(llm=llm)
107
-
108
- # messages = [
109
- # SystemMessage(content=combined_prompt),
110
- # HumanMessage(content=question)
111
- # ]
112
-
113
- # with st.spinner("Mentor is typing..."):
114
- # response = chat_model.invoke(messages)
115
-
116
- # st.markdown("### 🧠 Mentor's Response:")
117
- # st.markdown(response.content)
118
-
119
- # # Save chat
120
- # st.session_state.chat_history.append((selected_topics, experience_level, question, response.content))
121
-
122
- # # Display chat history
123
- # if st.session_state.chat_history:
124
- # st.markdown("---")
125
- # st.subheader("📝 Chat History")
126
- # for i, (topics, exp, q, a) in enumerate(st.session_state.chat_history, 1):
127
- # st.markdown(f"**{i}. Topics:** {', '.join(topics)} | **Mentor Level:** {exp}")
128
- # st.markdown(f"**You:** {q}")
129
- # st.markdown(f"**Mentor:** {a}")
130
- # st.markdown("---")
131
-
132
- # # Download PDF
133
- # if st.button("📄 Download PDF of this chat"):
134
- # pdf = FPDF()
135
- # pdf.add_page()
136
- # pdf.set_font("Arial", size=12)
137
-
138
- # pdf.cell(200, 10, txt="Data Science Mentor Chat History", ln=True, align="C")
139
- # pdf.ln(10)
140
-
141
- # for i, (topics, exp, q, a) in enumerate(st.session_state.chat_history, 1):
142
- # pdf.multi_cell(0, 10, f"{i}. Topics: {', '.join(topics)} | Mentor Level: {exp}\nYou: {q}\nMentor: {a}\n\n")
143
-
144
- # pdf_path = "/tmp/mentor_chat.pdf"
145
- # pdf.output(pdf_path)
146
-
147
- # with open(pdf_path, "rb") as f:
148
- # st.download_button("📥 Click to Download PDF", f, file_name="mentor_chat.pdf", mime="application/pdf")
149
-
150
-
151
  import streamlit as st
 
152
  from langchain_community.chat_models import ChatHuggingFace
153
  from langchain_community.llms import HuggingFaceHub
154
  from langchain_core.messages import HumanMessage, SystemMessage
155
 
156
  # ✅ Load your secret token from Hugging Face Space secrets
157
- HF_TOKEN = st.secrets["Final_key"]
 
 
158
 
159
  # ✅ Initialize the LLM with your token
160
  llm = HuggingFaceHub(
161
  repo_id="meta-llama/Llama-3.2-3B-Instruct",
162
- huggingfacehub_api_token=HF_TOKEN,
163
  model_kwargs={"temperature": 0.6, "max_new_tokens": 100}
164
  )
165
 
@@ -170,7 +23,7 @@ st.title("🧠 LLaMA Chat (Data Science Mentor)")
170
 
171
  question = st.text_input("Ask any question about data science topics:")
172
 
173
- if st.button("Ask"):
174
  messages = [
175
  SystemMessage(content="You are a data science mentor."),
176
  HumanMessage(content=question)
@@ -178,35 +31,3 @@ if st.button("Ask"):
178
  response = chat_model.invoke(messages)
179
  st.write("### Mentor's Response:")
180
  st.write(response.content)
181
-
182
-
183
- # import streamlit as st
184
- # from langchain_community.chat_models import ChatHuggingFace
185
- # from langchain_community.llms import HuggingFaceHub
186
- # from langchain_core.messages import HumanMessage, SystemMessage
187
-
188
- # # Directly enter or securely load your Hugging Face API token
189
- # HF_TOKEN = "your_huggingface_token_here" # 🔐 Replace with your token or use st.secrets
190
-
191
- # # Load model with token explicitly passed
192
- # llm = HuggingFaceHub(
193
- # repo_id="meta-llama/Llama-3.2-3B-Instruct",
194
- # huggingfacehub_api_token=HF_TOKEN,
195
- # model_kwargs={"temperature": 0.6, "max_new_tokens": 100}
196
- # )
197
-
198
- # chat_model = ChatHuggingFace(llm=llm)
199
-
200
- # # Streamlit UI
201
- # st.title("🧪 Simple LLaMA Chat Test")
202
-
203
- # question = st.text_input("Ask a gaming-related question:", "Give me name of top 10 PC games of all time with description")
204
-
205
- # if st.button("Ask"):
206
- # messages = [
207
- # SystemMessage(content="Answer like you are a hardcore PC gamer"),
208
- # HumanMessage(content=question)
209
- # ]
210
- # response = chat_model.invoke(messages)
211
- # st.write("### Response:")
212
- # st.write(response.content)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ import os
3
  from langchain_community.chat_models import ChatHuggingFace
4
  from langchain_community.llms import HuggingFaceHub
5
  from langchain_core.messages import HumanMessage, SystemMessage
6
 
7
  # ✅ Load your secret token from Hugging Face Space secrets
8
+ hf_token = os.getenv("Data_science") # Make sure "Data_science" is set in Space secrets
9
+ os.environ["HUGGINGFACEHUB_API_KEY"] = hf_token
10
+ os.environ["HF_TOKEN"] = hf_token
11
 
12
  # ✅ Initialize the LLM with your token
13
  llm = HuggingFaceHub(
14
  repo_id="meta-llama/Llama-3.2-3B-Instruct",
15
+ huggingfacehub_api_token=hf_token, # <-- use hf_token here
16
  model_kwargs={"temperature": 0.6, "max_new_tokens": 100}
17
  )
18
 
 
23
 
24
  question = st.text_input("Ask any question about data science topics:")
25
 
26
+ if st.button("Ask") and question.strip():
27
  messages = [
28
  SystemMessage(content="You are a data science mentor."),
29
  HumanMessage(content=question)
 
31
  response = chat_model.invoke(messages)
32
  st.write("### Mentor's Response:")
33
  st.write(response.content)