Spaces:
Sleeping
Sleeping
from langchain_community.vectorstores import Chroma | |
from langchain_community.embeddings import HuggingFaceEmbeddings | |
from langchain_community.llms import HuggingFaceHub | |
from langchain.chains import RetrievalQA | |
from knowledge_base import load_vectorstore | |
from dotenv import load_dotenv # π load environment variables | |
import os | |
# === Load secret token from environment === | |
load_dotenv() | |
hf_token = os.getenv("HF_EAT2FIT_API_TOKEN") | |
# === Load vectorstore with meal plan embeddings === | |
db = load_vectorstore() | |
retriever = db.as_retriever() | |
# === Load LLM (HuggingFace Inference API) === | |
llm = HuggingFaceHub( | |
repo_id="mistralai/Mistral-7B-Instruct-v0.1", | |
huggingfacehub_api_token=hf_token, # π pass token here | |
model_kwargs={"temperature": 0.3, "max_new_tokens": 512} | |
) | |
# === Build Retrieval QA Chain === | |
qa_chain = RetrievalQA.from_chain_type( | |
llm=llm, | |
retriever=retriever, | |
chain_type="stuff" | |
) | |
def get_bot_response(query): | |
""" | |
Accepts a user query, runs through RAG chain, and returns a response. | |
""" | |
if not query: | |
return "Please ask something." | |
try: | |
result = qa_chain.invoke({"query": query}) | |
if not result or not result.get("result"): | |
return "π€ Sorry, I couldn't find anything relevant in the meal plans." | |
return result["result"] | |
except Exception as e: | |
return f"β Failed to process your query: {e}" | |