Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -149,16 +149,14 @@
|
|
149 |
|
150 |
|
151 |
import streamlit as st
|
152 |
-
import os
|
153 |
from langchain_community.chat_models import ChatHuggingFace
|
154 |
from langchain_community.llms import HuggingFaceHub
|
155 |
from langchain_core.messages import HumanMessage, SystemMessage
|
156 |
|
157 |
-
#
|
158 |
-
HF_TOKEN = st.secrets["
|
159 |
-
os.environ["HUGGINGFACEHUB_API_TOKEN"] = HF_TOKEN # Optional but okay to set
|
160 |
|
161 |
-
#
|
162 |
llm = HuggingFaceHub(
|
163 |
repo_id="meta-llama/Llama-3.2-3B-Instruct",
|
164 |
huggingfacehub_api_token=HF_TOKEN,
|
@@ -167,20 +165,21 @@ llm = HuggingFaceHub(
|
|
167 |
|
168 |
chat_model = ChatHuggingFace(llm=llm)
|
169 |
|
170 |
-
# Streamlit UI
|
171 |
-
st.title("
|
172 |
|
173 |
-
question = st.text_input("Ask
|
174 |
|
175 |
if st.button("Ask"):
|
176 |
messages = [
|
177 |
-
SystemMessage(content="
|
178 |
HumanMessage(content=question)
|
179 |
]
|
180 |
response = chat_model.invoke(messages)
|
181 |
-
st.write("### Response:")
|
182 |
st.write(response.content)
|
183 |
|
|
|
184 |
# import streamlit as st
|
185 |
# from langchain_community.chat_models import ChatHuggingFace
|
186 |
# from langchain_community.llms import HuggingFaceHub
|
|
|
149 |
|
150 |
|
151 |
import streamlit as st
|
|
|
152 |
from langchain_community.chat_models import ChatHuggingFace
|
153 |
from langchain_community.llms import HuggingFaceHub
|
154 |
from langchain_core.messages import HumanMessage, SystemMessage
|
155 |
|
156 |
+
# ✅ Load your secret token from Hugging Face Space secrets
|
157 |
+
HF_TOKEN = st.secrets["Final_key"]
|
|
|
158 |
|
159 |
+
# ✅ Initialize the LLM with your token
|
160 |
llm = HuggingFaceHub(
|
161 |
repo_id="meta-llama/Llama-3.2-3B-Instruct",
|
162 |
huggingfacehub_api_token=HF_TOKEN,
|
|
|
165 |
|
166 |
chat_model = ChatHuggingFace(llm=llm)
|
167 |
|
168 |
+
# ✅ Streamlit UI
|
169 |
+
st.title("🧠 LLaMA Chat (Data Science Mentor)")
|
170 |
|
171 |
+
question = st.text_input("Ask any question about data science topics:")
|
172 |
|
173 |
if st.button("Ask"):
|
174 |
messages = [
|
175 |
+
SystemMessage(content="You are a data science mentor."),
|
176 |
HumanMessage(content=question)
|
177 |
]
|
178 |
response = chat_model.invoke(messages)
|
179 |
+
st.write("### Mentor's Response:")
|
180 |
st.write(response.content)
|
181 |
|
182 |
+
|
183 |
# import streamlit as st
|
184 |
# from langchain_community.chat_models import ChatHuggingFace
|
185 |
# from langchain_community.llms import HuggingFaceHub
|