DurgaDeepak commited on
Commit
7a51850
·
verified ·
1 Parent(s): e0f4eae

Update chatbot_logic.py

Browse files
Files changed (1) hide show
  1. chatbot_logic.py +10 -4
chatbot_logic.py CHANGED
@@ -1,22 +1,28 @@
1
  from langchain_community.vectorstores import Chroma
2
  from langchain_community.embeddings import HuggingFaceEmbeddings
3
  from langchain_community.llms import HuggingFaceHub
4
- from langchain_huggingface import HuggingFaceEndpoint
5
  from langchain.chains import RetrievalQA
6
  from knowledge_base import load_vectorstore
 
 
7
  import os
8
 
9
- # Load vectorstore with meal plan embeddings
 
 
 
 
10
  db = load_vectorstore()
11
  retriever = db.as_retriever()
12
 
13
- # Load LLM (HuggingFace Inference API)
14
  llm = HuggingFaceHub(
15
  repo_id="mistralai/Mistral-7B-Instruct-v0.1",
 
16
  model_kwargs={"temperature": 0.3, "max_new_tokens": 512}
17
  )
18
 
19
- # Build Retrieval QA Chain
20
  qa_chain = RetrievalQA.from_chain_type(
21
  llm=llm,
22
  retriever=retriever,
 
1
  from langchain_community.vectorstores import Chroma
2
  from langchain_community.embeddings import HuggingFaceEmbeddings
3
  from langchain_community.llms import HuggingFaceHub
 
4
  from langchain.chains import RetrievalQA
5
  from knowledge_base import load_vectorstore
6
+
7
+ from dotenv import load_dotenv # 👈 load environment variables
8
  import os
9
 
10
+ # === Load secret token from environment ===
11
+ load_dotenv()
12
+ hf_token = os.getenv("HUGGINGFACEHUB_API_TOKEN")
13
+
14
+ # === Load vectorstore with meal plan embeddings ===
15
  db = load_vectorstore()
16
  retriever = db.as_retriever()
17
 
18
+ # === Load LLM (HuggingFace Inference API) ===
19
  llm = HuggingFaceHub(
20
  repo_id="mistralai/Mistral-7B-Instruct-v0.1",
21
+ huggingfacehub_api_token=hf_token, # 👈 pass token here
22
  model_kwargs={"temperature": 0.3, "max_new_tokens": 512}
23
  )
24
 
25
+ # === Build Retrieval QA Chain ===
26
  qa_chain = RetrievalQA.from_chain_type(
27
  llm=llm,
28
  retriever=retriever,