Update app.py
Browse files
app.py
CHANGED
@@ -129,7 +129,7 @@ class BasicAgent:
|
|
129 |
def _call_llm(self, prompt: str, max_tokens: int = 256) -> str:
|
130 |
try:
|
131 |
resp = self.llm.chat.completions.create(
|
132 |
-
model="gpt-
|
133 |
messages=[
|
134 |
{"role": "system", "content": "You are a careful reasoning assistant."},
|
135 |
{"role": "user", "content": prompt},
|
@@ -154,18 +154,9 @@ class BasicAgent:
|
|
154 |
state["current_step"] = "sheet"
|
155 |
return state
|
156 |
|
157 |
-
# Check if question is about the file we just processed
|
158 |
-
if state["history"]:
|
159 |
-
last_step = state["history"][-1]["step"]
|
160 |
-
if last_step in ["video", "image", "sheet"]:
|
161 |
-
state["current_step"] = "answer"
|
162 |
-
return state
|
163 |
-
|
164 |
# Regular text question analysis
|
165 |
prompt = (
|
166 |
-
"Decide if this question needs web search.
|
167 |
-
"For questions about files we just processed or simple reasoning, don't use search. "
|
168 |
-
"Respond with a Python dict:\n"
|
169 |
"{\n 'needs_search': bool,\n 'search_query': str\n}\n\n"
|
170 |
f"Question: {state['question']}"
|
171 |
)
|
@@ -176,7 +167,7 @@ class BasicAgent:
|
|
176 |
state["search_query"] = decision.get("search_query", state["question"])
|
177 |
except Exception as e:
|
178 |
print(f"\nLLM Error in question analysis: {str(e)}")
|
179 |
-
state["needs_search"] =
|
180 |
state["search_query"] = state["question"]
|
181 |
|
182 |
state["current_step"] = "search" if state["needs_search"] else "answer"
|
@@ -286,8 +277,10 @@ class BasicAgent:
|
|
286 |
# Join all materials with clear separation
|
287 |
search_block = "\n\n".join(materials) if materials else "No materials available."
|
288 |
|
|
|
289 |
prompt = f"""
|
290 |
-
|
|
|
291 |
|
292 |
QUESTION:
|
293 |
{state['question']}
|
@@ -301,10 +294,40 @@ Write ANSWER: <answer> on its own line.
|
|
301 |
raw = self._call_llm(prompt, 300)
|
302 |
answer = raw.split("ANSWER:")[-1].strip()
|
303 |
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
308 |
|
309 |
state["final_answer"] = answer
|
310 |
state["current_step"] = "done"
|
|
|
129 |
def _call_llm(self, prompt: str, max_tokens: int = 256) -> str:
|
130 |
try:
|
131 |
resp = self.llm.chat.completions.create(
|
132 |
+
model="gpt-4o-mini",
|
133 |
messages=[
|
134 |
{"role": "system", "content": "You are a careful reasoning assistant."},
|
135 |
{"role": "user", "content": prompt},
|
|
|
154 |
state["current_step"] = "sheet"
|
155 |
return state
|
156 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
157 |
# Regular text question analysis
|
158 |
prompt = (
|
159 |
+
"Decide if this question needs web search. Respond with a Python dict:\n"
|
|
|
|
|
160 |
"{\n 'needs_search': bool,\n 'search_query': str\n}\n\n"
|
161 |
f"Question: {state['question']}"
|
162 |
)
|
|
|
167 |
state["search_query"] = decision.get("search_query", state["question"])
|
168 |
except Exception as e:
|
169 |
print(f"\nLLM Error in question analysis: {str(e)}")
|
170 |
+
state["needs_search"] = True
|
171 |
state["search_query"] = state["question"]
|
172 |
|
173 |
state["current_step"] = "search" if state["needs_search"] else "answer"
|
|
|
277 |
# Join all materials with clear separation
|
278 |
search_block = "\n\n".join(materials) if materials else "No materials available."
|
279 |
|
280 |
+
# First attempt with full context
|
281 |
prompt = f"""
|
282 |
+
You are a helpful assistant. Your task is to answer the question using ONLY the materials provided.
|
283 |
+
If you cannot find a direct answer, provide the most relevant information you can find.
|
284 |
|
285 |
QUESTION:
|
286 |
{state['question']}
|
|
|
294 |
raw = self._call_llm(prompt, 300)
|
295 |
answer = raw.split("ANSWER:")[-1].strip()
|
296 |
|
297 |
+
# If first attempt fails or is empty, try a more direct prompt
|
298 |
+
if not answer or "ANSWER:" not in raw or any(k in answer.lower() for k in ["cannot", "sorry", "don't know"]):
|
299 |
+
print("\nFirst attempt failed, trying direct prompt...")
|
300 |
+
direct_prompt = f"""
|
301 |
+
Answer this question directly and concisely. Use the materials provided.
|
302 |
+
|
303 |
+
QUESTION:
|
304 |
+
{state['question']}
|
305 |
+
|
306 |
+
MATERIALS:
|
307 |
+
{search_block}
|
308 |
+
|
309 |
+
If you cannot find an exact answer, provide the most relevant information from the materials.
|
310 |
+
Write ANSWER: <answer> on its own line.
|
311 |
+
"""
|
312 |
+
raw = self._call_llm(direct_prompt, 300)
|
313 |
+
answer = raw.split("ANSWER:")[-1].strip()
|
314 |
+
|
315 |
+
# Final validation and fallback
|
316 |
+
if not answer or "ANSWER:" not in raw:
|
317 |
+
print("\nBoth attempts failed, using fallback answer...")
|
318 |
+
if materials:
|
319 |
+
# If we have materials but no answer, summarize what we know
|
320 |
+
summary_prompt = f"""
|
321 |
+
Summarize the key information from these materials in one sentence:
|
322 |
+
|
323 |
+
{search_block}
|
324 |
+
|
325 |
+
Write ANSWER: <answer> on its own line.
|
326 |
+
"""
|
327 |
+
raw = self._call_llm(summary_prompt, 150)
|
328 |
+
answer = raw.split("ANSWER:")[-1].strip()
|
329 |
+
else:
|
330 |
+
answer = "I cannot provide a definitive answer at this time."
|
331 |
|
332 |
state["final_answer"] = answer
|
333 |
state["current_step"] = "done"
|