Spaces:
Sleeping
Sleeping
Update knowledge_engine.py
Browse files- knowledge_engine.py +387 -133
knowledge_engine.py
CHANGED
@@ -1,12 +1,15 @@
|
|
1 |
import os
|
2 |
import pickle
|
3 |
-
|
|
|
|
|
4 |
from datetime import datetime
|
5 |
from concurrent.futures import ThreadPoolExecutor
|
|
|
6 |
|
7 |
from config import Config
|
8 |
from langchain_core.documents import Document
|
9 |
-
from langchain_community.document_loaders import TextLoader
|
10 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
11 |
from langchain_community.vectorstores import FAISS
|
12 |
from langchain.chains import RetrievalQA
|
@@ -17,120 +20,335 @@ from langchain_huggingface import HuggingFaceEndpoint
|
|
17 |
|
18 |
class KnowledgeManager:
|
19 |
def __init__(self):
|
20 |
-
|
|
|
21 |
self.embeddings = self._init_embeddings()
|
22 |
-
self.vector_db
|
23 |
-
self.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
|
25 |
def _init_embeddings(self):
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
|
33 |
def _init_llm(self):
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
|
41 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
|
43 |
-
def
|
44 |
-
|
45 |
-
|
|
|
|
|
|
|
46 |
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
)
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
vector_db.save_local(str(Config.VECTOR_STORE_PATH))
|
88 |
-
|
89 |
-
bm25_retriever = BM25Retriever.from_documents(chunks)
|
90 |
-
bm25_retriever.k = Config.MAX_CONTEXT_CHUNKS
|
91 |
-
|
92 |
-
with open(Config.BM25_STORE_PATH, "wb") as f:
|
93 |
-
pickle.dump(bm25_retriever, f)
|
94 |
-
|
95 |
-
return vector_db, bm25_retriever
|
96 |
|
97 |
-
def
|
98 |
-
|
99 |
-
|
100 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
101 |
|
102 |
def _parallel_retrieve(self, question: str) -> List[Document]:
|
|
|
|
|
|
|
|
|
103 |
def retrieve_with_bm25():
|
104 |
-
|
|
|
|
|
|
|
|
|
105 |
|
106 |
def retrieve_with_vector():
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
|
|
|
|
|
|
|
|
|
|
120 |
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
|
|
126 |
|
127 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
128 |
|
129 |
def _create_qa_chain(self):
|
|
|
130 |
if not self.vector_db or not self.bm25_retriever:
|
131 |
return None
|
132 |
|
133 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
134 |
|
135 |
Context:
|
136 |
{context}
|
@@ -138,71 +356,107 @@ Context:
|
|
138 |
Question: {question}
|
139 |
|
140 |
Instructions:
|
141 |
-
-
|
142 |
-
-
|
143 |
-
-
|
|
|
144 |
|
145 |
Answer:"""
|
146 |
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
159 |
|
160 |
def query(self, question: str) -> Dict[str, Any]:
|
|
|
|
|
|
|
|
|
161 |
if not self.qa_chain:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
162 |
return {
|
163 |
-
"answer":
|
164 |
-
"processing_time":
|
165 |
-
"source_chunks": []
|
166 |
}
|
167 |
|
168 |
try:
|
169 |
-
|
170 |
docs = self._parallel_retrieve(question)
|
171 |
|
172 |
if not docs:
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
}
|
179 |
-
)
|
180 |
-
docs = retriever.invoke(question)
|
181 |
|
182 |
-
result = self.qa_chain.invoke({
|
|
|
|
|
|
|
183 |
|
184 |
return {
|
185 |
"answer": result.get("result", "No answer could be generated"),
|
186 |
"processing_time": (datetime.now() - start_time).total_seconds() * 1000,
|
187 |
-
"source_chunks": result.get("source_documents", [])
|
188 |
}
|
189 |
|
190 |
except Exception as e:
|
191 |
print(f"[!] Query error: {str(e)}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
192 |
return {
|
193 |
-
"answer":
|
194 |
-
"processing_time":
|
195 |
-
"source_chunks": []
|
196 |
}
|
197 |
|
198 |
def get_knowledge_files_count(self) -> int:
|
199 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
200 |
|
201 |
-
def
|
|
|
202 |
try:
|
203 |
-
|
204 |
-
f.write(uploaded_file.getbuffer())
|
205 |
-
return True
|
206 |
except Exception as e:
|
207 |
-
print(f"[!]
|
208 |
-
|
|
|
|
|
|
|
|
1 |
import os
|
2 |
import pickle
|
3 |
+
import tempfile
|
4 |
+
import shutil
|
5 |
+
from typing import Dict, Any, List, Optional
|
6 |
from datetime import datetime
|
7 |
from concurrent.futures import ThreadPoolExecutor
|
8 |
+
import io
|
9 |
|
10 |
from config import Config
|
11 |
from langchain_core.documents import Document
|
12 |
+
from langchain_community.document_loaders import TextLoader
|
13 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
14 |
from langchain_community.vectorstores import FAISS
|
15 |
from langchain.chains import RetrievalQA
|
|
|
20 |
|
21 |
class KnowledgeManager:
|
22 |
def __init__(self):
|
23 |
+
self.temp_dir = tempfile.mkdtemp() # Use temp directory for HF Spaces
|
24 |
+
self.setup_temp_dirs()
|
25 |
self.embeddings = self._init_embeddings()
|
26 |
+
self.vector_db = None
|
27 |
+
self.bm25_retriever = None
|
28 |
+
self.qa_chain = None
|
29 |
+
self.llm = None
|
30 |
+
self.knowledge_texts = [] # Store texts in memory
|
31 |
+
|
32 |
+
# Initialize with default knowledge
|
33 |
+
self._create_default_knowledge()
|
34 |
+
self._init_system()
|
35 |
+
|
36 |
+
def setup_temp_dirs(self):
|
37 |
+
"""Setup temporary directories for HF Spaces compatibility"""
|
38 |
+
self.knowledge_dir = os.path.join(self.temp_dir, "knowledge")
|
39 |
+
self.vector_store_path = os.path.join(self.temp_dir, "vector_store")
|
40 |
+
self.bm25_store_path = os.path.join(self.temp_dir, "bm25_store.pkl")
|
41 |
+
|
42 |
+
os.makedirs(self.knowledge_dir, exist_ok=True)
|
43 |
+
os.makedirs(self.vector_store_path, exist_ok=True)
|
44 |
|
45 |
def _init_embeddings(self):
|
46 |
+
"""Initialize embeddings with error handling"""
|
47 |
+
try:
|
48 |
+
print("[i] Initializing Hugging Face embeddings...")
|
49 |
+
return HuggingFaceEmbeddings(
|
50 |
+
model_name="sentence-transformers/all-mpnet-base-v2",
|
51 |
+
model_kwargs={'device': 'cpu'},
|
52 |
+
encode_kwargs={'normalize_embeddings': True}
|
53 |
+
)
|
54 |
+
except Exception as e:
|
55 |
+
print(f"[!] Error initializing embeddings: {e}")
|
56 |
+
# Fallback to a smaller model
|
57 |
+
try:
|
58 |
+
return HuggingFaceEmbeddings(
|
59 |
+
model_name="sentence-transformers/all-MiniLM-L6-v2",
|
60 |
+
model_kwargs={'device': 'cpu'},
|
61 |
+
encode_kwargs={'normalize_embeddings': True}
|
62 |
+
)
|
63 |
+
except Exception as e2:
|
64 |
+
print(f"[!] Fallback embeddings also failed: {e2}")
|
65 |
+
return None
|
66 |
|
67 |
def _init_llm(self):
|
68 |
+
"""Initialize LLM with proper error handling and fallbacks"""
|
69 |
+
if self.llm is not None:
|
70 |
+
return self.llm
|
71 |
+
|
72 |
+
hf_token = os.getenv("HUGGINGFACEHUB_API_TOKEN") or os.getenv("HF_TOKEN")
|
73 |
+
|
74 |
+
if not hf_token:
|
75 |
+
print("[!] No Hugging Face API token found. Set HUGGINGFACEHUB_API_TOKEN or HF_TOKEN environment variable.")
|
76 |
+
return None
|
77 |
+
|
78 |
+
try:
|
79 |
+
print("[i] Initializing HuggingFace LLM...")
|
80 |
+
self.llm = HuggingFaceEndpoint(
|
81 |
+
repo_id="mistralai/Mistral-7B-Instruct-v0.1",
|
82 |
+
temperature=0.1,
|
83 |
+
max_new_tokens=512,
|
84 |
+
huggingfacehub_api_token=hf_token,
|
85 |
+
timeout=60 # Add timeout
|
86 |
+
)
|
87 |
+
|
88 |
+
# Test the LLM with a simple query
|
89 |
+
test_response = self.llm.invoke("Hello")
|
90 |
+
print("[i] LLM initialized successfully")
|
91 |
+
return self.llm
|
92 |
|
93 |
+
except Exception as e:
|
94 |
+
print(f"[!] Error with Mistral model: {e}")
|
95 |
+
# Try alternative models
|
96 |
+
fallback_models = [
|
97 |
+
"microsoft/DialoGPT-medium",
|
98 |
+
"google/flan-t5-base",
|
99 |
+
"huggingface/CodeBERTa-small-v1"
|
100 |
+
]
|
101 |
+
|
102 |
+
for model in fallback_models:
|
103 |
+
try:
|
104 |
+
print(f"[i] Trying fallback model: {model}")
|
105 |
+
self.llm = HuggingFaceEndpoint(
|
106 |
+
repo_id=model,
|
107 |
+
temperature=0.1,
|
108 |
+
max_new_tokens=256,
|
109 |
+
huggingfacehub_api_token=hf_token,
|
110 |
+
timeout=30
|
111 |
+
)
|
112 |
+
test_response = self.llm.invoke("Hello")
|
113 |
+
print(f"[i] Successfully initialized with {model}")
|
114 |
+
return self.llm
|
115 |
+
except Exception as e2:
|
116 |
+
print(f"[!] {model} also failed: {e2}")
|
117 |
+
continue
|
118 |
+
|
119 |
+
print("[!] All LLM models failed. Using mock responses.")
|
120 |
+
return None
|
121 |
+
|
122 |
+
def _init_system(self):
|
123 |
+
"""Initialize the retrieval system"""
|
124 |
+
try:
|
125 |
+
self.vector_db, self.bm25_retriever = self._build_retrievers_from_texts()
|
126 |
+
self.qa_chain = self._create_qa_chain()
|
127 |
+
except Exception as e:
|
128 |
+
print(f"[!] Error initializing system: {e}")
|
129 |
|
130 |
+
def _create_default_knowledge(self):
|
131 |
+
"""Create default knowledge base"""
|
132 |
+
default_texts = [
|
133 |
+
{
|
134 |
+
"filename": "sirraya_xbrain.txt",
|
135 |
+
"content": """Sirraya xBrain - Advanced AI Platform
|
136 |
|
137 |
+
Created by Amir Hameed.
|
138 |
+
|
139 |
+
Sirraya xBrain is an intelligent AI platform that combines multiple retrieval methods for enhanced question answering capabilities.
|
140 |
+
|
141 |
+
Key Features:
|
142 |
+
- Hybrid Retrieval System: Combines Vector Search (FAISS) with BM25 keyword search
|
143 |
+
- LISA Assistant: An AI assistant powered by language models
|
144 |
+
- Document Processing: Automatic text chunking and embedding generation
|
145 |
+
- Multi-Modal Retrieval: Both semantic and keyword-based search
|
146 |
+
- Real-time Query Processing: Fast response times with parallel retrieval
|
147 |
+
|
148 |
+
Technical Components:
|
149 |
+
- FAISS (Facebook AI Similarity Search) for vector-based semantic search
|
150 |
+
- BM25 (Best Matching 25) for traditional keyword-based information retrieval
|
151 |
+
- HuggingFace Transformers for language model integration
|
152 |
+
- LangChain for building the question-answering pipeline
|
153 |
+
|
154 |
+
The platform is designed to provide accurate and contextually relevant answers by leveraging both semantic understanding and keyword matching techniques."""
|
155 |
+
},
|
156 |
+
{
|
157 |
+
"filename": "technical_details.txt",
|
158 |
+
"content": """Technical Architecture of Sirraya xBrain
|
159 |
+
|
160 |
+
Vector Database:
|
161 |
+
- Uses FAISS for efficient similarity search
|
162 |
+
- Embeddings generated using sentence-transformers/all-mpnet-base-v2
|
163 |
+
- Cosine similarity for measuring document relevance
|
164 |
+
- Configurable similarity thresholds
|
165 |
+
|
166 |
+
BM25 Retriever:
|
167 |
+
- Traditional keyword-based search algorithm
|
168 |
+
- Complements vector search for better recall
|
169 |
+
- Effective for exact keyword matches
|
170 |
+
|
171 |
+
Text Processing:
|
172 |
+
- Recursive character text splitter for document chunking
|
173 |
+
- Configurable chunk size and overlap
|
174 |
+
- Supports multiple text formats
|
175 |
+
|
176 |
+
Query Processing Pipeline:
|
177 |
+
1. Parallel retrieval from both vector and BM25 systems
|
178 |
+
2. Document scoring and ranking
|
179 |
+
3. Context preparation for language model
|
180 |
+
4. Answer generation using prompt templates
|
181 |
+
5. Source document citation
|
182 |
+
|
183 |
+
Performance Optimizations:
|
184 |
+
- ThreadPoolExecutor for parallel processing
|
185 |
+
- Configurable retrieval parameters
|
186 |
+
- Fallback mechanisms for failed retrievals"""
|
187 |
+
}
|
188 |
+
]
|
189 |
+
|
190 |
+
self.knowledge_texts = default_texts
|
191 |
+
|
192 |
+
# Also save to temp files for compatibility
|
193 |
+
for text_data in default_texts:
|
194 |
+
filepath = os.path.join(self.knowledge_dir, text_data["filename"])
|
195 |
+
with open(filepath, "w", encoding="utf-8") as f:
|
196 |
+
f.write(text_data["content"])
|
197 |
+
|
198 |
+
def _build_retrievers_from_texts(self):
|
199 |
+
"""Build retrievers from in-memory texts"""
|
200 |
+
if not self.embeddings:
|
201 |
+
print("[!] No embeddings available")
|
202 |
+
return None, None
|
203 |
+
|
204 |
+
try:
|
205 |
+
# Create documents from stored texts
|
206 |
+
documents = []
|
207 |
+
for text_data in self.knowledge_texts:
|
208 |
+
doc = Document(
|
209 |
+
page_content=text_data["content"],
|
210 |
+
metadata={"source": text_data["filename"]}
|
211 |
)
|
212 |
+
documents.append(doc)
|
213 |
+
|
214 |
+
# Split documents into chunks
|
215 |
+
splitter = RecursiveCharacterTextSplitter(
|
216 |
+
chunk_size=getattr(Config, 'CHUNK_SIZE', 1000),
|
217 |
+
chunk_overlap=getattr(Config, 'CHUNK_OVERLAP', 200),
|
218 |
+
separators=["\n\n", "\n", ". ", "! ", "? ", "; ", " ", ""]
|
219 |
+
)
|
220 |
+
chunks = splitter.split_documents(documents)
|
221 |
+
|
222 |
+
if not chunks:
|
223 |
+
print("[!] No chunks created")
|
224 |
+
return None, None
|
225 |
+
|
226 |
+
print(f"[i] Created {len(chunks)} chunks")
|
227 |
+
|
228 |
+
# Create vector database
|
229 |
+
vector_db = FAISS.from_documents(
|
230 |
+
chunks,
|
231 |
+
self.embeddings,
|
232 |
+
distance_strategy="COSINE"
|
233 |
+
)
|
234 |
+
|
235 |
+
# Create BM25 retriever
|
236 |
+
bm25_retriever = BM25Retriever.from_documents(chunks)
|
237 |
+
bm25_retriever.k = getattr(Config, 'MAX_CONTEXT_CHUNKS', 5)
|
238 |
+
|
239 |
+
print("[i] Successfully created retrievers")
|
240 |
+
return vector_db, bm25_retriever
|
241 |
+
|
242 |
+
except Exception as e:
|
243 |
+
print(f"[!] Error building retrievers: {e}")
|
244 |
+
return None, None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
245 |
|
246 |
+
def add_text_content(self, filename: str, content: str) -> bool:
|
247 |
+
"""Add text content to knowledge base"""
|
248 |
+
try:
|
249 |
+
# Add to in-memory storage
|
250 |
+
self.knowledge_texts.append({
|
251 |
+
"filename": filename,
|
252 |
+
"content": content
|
253 |
+
})
|
254 |
+
|
255 |
+
# Save to temp file
|
256 |
+
filepath = os.path.join(self.knowledge_dir, filename)
|
257 |
+
with open(filepath, "w", encoding="utf-8") as f:
|
258 |
+
f.write(content)
|
259 |
+
|
260 |
+
# Rebuild retrievers
|
261 |
+
self.vector_db, self.bm25_retriever = self._build_retrievers_from_texts()
|
262 |
+
self.qa_chain = self._create_qa_chain()
|
263 |
+
|
264 |
+
print(f"[i] Added {filename} to knowledge base")
|
265 |
+
return True
|
266 |
+
|
267 |
+
except Exception as e:
|
268 |
+
print(f"[!] Error adding text content: {e}")
|
269 |
+
return False
|
270 |
+
|
271 |
+
def add_uploaded_file(self, file_content: bytes, filename: str) -> bool:
|
272 |
+
"""Add uploaded file content to knowledge base"""
|
273 |
+
try:
|
274 |
+
# Decode file content
|
275 |
+
content = file_content.decode('utf-8')
|
276 |
+
return self.add_text_content(filename, content)
|
277 |
+
except UnicodeDecodeError:
|
278 |
+
print(f"[!] Could not decode {filename} as UTF-8")
|
279 |
+
return False
|
280 |
+
except Exception as e:
|
281 |
+
print(f"[!] Error processing uploaded file: {e}")
|
282 |
+
return False
|
283 |
|
284 |
def _parallel_retrieve(self, question: str) -> List[Document]:
|
285 |
+
"""Retrieve documents using both vector and BM25 search"""
|
286 |
+
if not self.vector_db or not self.bm25_retriever:
|
287 |
+
return []
|
288 |
+
|
289 |
def retrieve_with_bm25():
|
290 |
+
try:
|
291 |
+
return self.bm25_retriever.invoke(question)
|
292 |
+
except Exception as e:
|
293 |
+
print(f"[!] BM25 retrieval error: {e}")
|
294 |
+
return []
|
295 |
|
296 |
def retrieve_with_vector():
|
297 |
+
try:
|
298 |
+
retriever = self.vector_db.as_retriever(
|
299 |
+
search_type="similarity_score_threshold",
|
300 |
+
search_kwargs={
|
301 |
+
"k": getattr(Config, 'MAX_CONTEXT_CHUNKS', 5),
|
302 |
+
"score_threshold": 0.3
|
303 |
+
}
|
304 |
+
)
|
305 |
+
return retriever.invoke(question)
|
306 |
+
except Exception as e:
|
307 |
+
print(f"[!] Vector retrieval error: {e}")
|
308 |
+
# Fallback to simple similarity search
|
309 |
+
try:
|
310 |
+
docs = self.vector_db.similarity_search(question, k=3)
|
311 |
+
return docs
|
312 |
+
except Exception as e2:
|
313 |
+
print(f"[!] Fallback vector search also failed: {e2}")
|
314 |
+
return []
|
315 |
|
316 |
+
try:
|
317 |
+
with ThreadPoolExecutor(max_workers=2) as executor:
|
318 |
+
bm25_future = executor.submit(retrieve_with_bm25)
|
319 |
+
vector_future = executor.submit(retrieve_with_vector)
|
320 |
+
bm25_results = bm25_future.result()
|
321 |
+
vector_results = vector_future.result()
|
322 |
|
323 |
+
# Combine and deduplicate results
|
324 |
+
all_docs = vector_results + bm25_results
|
325 |
+
seen_content = set()
|
326 |
+
unique_docs = []
|
327 |
+
|
328 |
+
for doc in all_docs:
|
329 |
+
content_hash = hash(doc.page_content)
|
330 |
+
if content_hash not in seen_content:
|
331 |
+
seen_content.add(content_hash)
|
332 |
+
unique_docs.append(doc)
|
333 |
+
|
334 |
+
return unique_docs[:getattr(Config, 'MAX_CONTEXT_CHUNKS', 5)]
|
335 |
+
|
336 |
+
except Exception as e:
|
337 |
+
print(f"[!] Parallel retrieval error: {e}")
|
338 |
+
return []
|
339 |
|
340 |
def _create_qa_chain(self):
|
341 |
+
"""Create the QA chain"""
|
342 |
if not self.vector_db or not self.bm25_retriever:
|
343 |
return None
|
344 |
|
345 |
+
llm = self._init_llm()
|
346 |
+
if not llm:
|
347 |
+
return None
|
348 |
+
|
349 |
+
prompt_template = """You are LISA, an AI assistant for Sirraya xBrain platform created by Amir Hameed.
|
350 |
+
|
351 |
+
Use the following context to answer the question accurately and helpfully:
|
352 |
|
353 |
Context:
|
354 |
{context}
|
|
|
356 |
Question: {question}
|
357 |
|
358 |
Instructions:
|
359 |
+
- Provide accurate answers based on the context
|
360 |
+
- If the information is not in the context, say "I don't have that information in my knowledge base"
|
361 |
+
- Be concise but comprehensive
|
362 |
+
- Cite relevant sources when possible
|
363 |
|
364 |
Answer:"""
|
365 |
|
366 |
+
try:
|
367 |
+
return RetrievalQA.from_chain_type(
|
368 |
+
llm=llm,
|
369 |
+
chain_type="stuff",
|
370 |
+
retriever=self.vector_db.as_retriever(
|
371 |
+
search_kwargs={"k": getattr(Config, 'MAX_CONTEXT_CHUNKS', 5)}
|
372 |
+
),
|
373 |
+
chain_type_kwargs={
|
374 |
+
"prompt": PromptTemplate(
|
375 |
+
template=prompt_template,
|
376 |
+
input_variables=["context", "question"]
|
377 |
+
)
|
378 |
+
},
|
379 |
+
return_source_documents=True
|
380 |
+
)
|
381 |
+
except Exception as e:
|
382 |
+
print(f"[!] Error creating QA chain: {e}")
|
383 |
+
return None
|
384 |
|
385 |
def query(self, question: str) -> Dict[str, Any]:
|
386 |
+
"""Process a query and return results"""
|
387 |
+
start_time = datetime.now()
|
388 |
+
|
389 |
+
# Fallback for when LLM is not available
|
390 |
if not self.qa_chain:
|
391 |
+
docs = self._parallel_retrieve(question)
|
392 |
+
if docs:
|
393 |
+
# Simple fallback response using retrieved context
|
394 |
+
context = "\n\n".join([doc.page_content for doc in docs[:2]])
|
395 |
+
answer = f"Based on the available information: {context[:500]}..."
|
396 |
+
else:
|
397 |
+
answer = "I don't have information about that topic in my knowledge base."
|
398 |
+
|
399 |
return {
|
400 |
+
"answer": answer,
|
401 |
+
"processing_time": (datetime.now() - start_time).total_seconds() * 1000,
|
402 |
+
"source_chunks": docs[:3] if docs else []
|
403 |
}
|
404 |
|
405 |
try:
|
406 |
+
# Use the full QA chain
|
407 |
docs = self._parallel_retrieve(question)
|
408 |
|
409 |
if not docs:
|
410 |
+
return {
|
411 |
+
"answer": "I couldn't find relevant information in my knowledge base for your question.",
|
412 |
+
"processing_time": (datetime.now() - start_time).total_seconds() * 1000,
|
413 |
+
"source_chunks": []
|
414 |
+
}
|
|
|
|
|
|
|
415 |
|
416 |
+
result = self.qa_chain.invoke({
|
417 |
+
"query": question,
|
418 |
+
"input_documents": docs
|
419 |
+
})
|
420 |
|
421 |
return {
|
422 |
"answer": result.get("result", "No answer could be generated"),
|
423 |
"processing_time": (datetime.now() - start_time).total_seconds() * 1000,
|
424 |
+
"source_chunks": result.get("source_documents", [])[:3]
|
425 |
}
|
426 |
|
427 |
except Exception as e:
|
428 |
print(f"[!] Query error: {str(e)}")
|
429 |
+
# Fallback to simple context-based response
|
430 |
+
docs = self._parallel_retrieve(question)
|
431 |
+
if docs:
|
432 |
+
context = docs[0].page_content[:300] + "..."
|
433 |
+
answer = f"Based on available information: {context}"
|
434 |
+
else:
|
435 |
+
answer = "I encountered an error processing your query. Please try rephrasing your question."
|
436 |
+
|
437 |
return {
|
438 |
+
"answer": answer,
|
439 |
+
"processing_time": (datetime.now() - start_time).total_seconds() * 1000,
|
440 |
+
"source_chunks": docs[:2] if docs else []
|
441 |
}
|
442 |
|
443 |
def get_knowledge_files_count(self) -> int:
|
444 |
+
"""Get count of knowledge files"""
|
445 |
+
return len(self.knowledge_texts)
|
446 |
+
|
447 |
+
def get_knowledge_summary(self) -> str:
|
448 |
+
"""Get summary of knowledge base"""
|
449 |
+
total_files = len(self.knowledge_texts)
|
450 |
+
total_chars = sum(len(text["content"]) for text in self.knowledge_texts)
|
451 |
+
return f"Knowledge Base: {total_files} files, ~{total_chars:,} characters"
|
452 |
|
453 |
+
def cleanup(self):
|
454 |
+
"""Clean up temporary files"""
|
455 |
try:
|
456 |
+
shutil.rmtree(self.temp_dir)
|
|
|
|
|
457 |
except Exception as e:
|
458 |
+
print(f"[!] Cleanup error: {e}")
|
459 |
+
|
460 |
+
def __del__(self):
|
461 |
+
"""Destructor to clean up resources"""
|
462 |
+
self.cleanup()
|