File size: 1,421 Bytes
e0f4eae
 
 
3cf3f2d
 
7a51850
 
3cf3f2d
 
7a51850
 
de85f00
7a51850
 
3cf3f2d
 
 
7a51850
3cf3f2d
 
7a51850
3cf3f2d
 
 
7a51850
3cf3f2d
 
 
 
 
 
 
 
 
 
 
 
 
 
14f0b8e
421bf87
e9e2666
421bf87
e9e2666
 
421bf87
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
from langchain_community.vectorstores import Chroma
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.llms import HuggingFaceHub 
from langchain.chains import RetrievalQA
from knowledge_base import load_vectorstore

from dotenv import load_dotenv  # πŸ‘ˆ load environment variables
import os

# === Load secret token from environment ===
load_dotenv()
hf_token = os.getenv("HF_EAT2FIT_API_TOKEN")

# === Load vectorstore with meal plan embeddings ===
db = load_vectorstore()
retriever = db.as_retriever()

# === Load LLM (HuggingFace Inference API) ===
llm = HuggingFaceHub(
    repo_id="mistralai/Mistral-7B-Instruct-v0.1",
    huggingfacehub_api_token=hf_token,  # πŸ‘ˆ pass token here
    model_kwargs={"temperature": 0.3, "max_new_tokens": 512}
)

# === Build Retrieval QA Chain ===
qa_chain = RetrievalQA.from_chain_type(
    llm=llm,
    retriever=retriever,
    chain_type="stuff"
)

def get_bot_response(query):
    """
    Accepts a user query, runs through RAG chain, and returns a response.
    """
    if not query:
        return "Please ask something."

    try:
        result = qa_chain.invoke({"query": query})
        if not result or not result.get("result"):
            return "πŸ€– Sorry, I couldn't find anything relevant in the meal plans."
        return result["result"]
    except Exception as e:
        return f"❌ Failed to process your query: {e}"