naman1102 commited on
Commit
b692f0c
·
1 Parent(s): fa8a2b0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -29
app.py CHANGED
@@ -137,16 +137,21 @@ class BasicAgent:
137
 
138
  # ---- Low‑level LLM call
139
  def _call_llm(self, prompt: str, max_tokens: int = 256) -> str:
140
- resp = self.llm.chat.completions.create(
141
- model="gpt-4o-mini",
142
- messages=[
143
- {"role": "system", "content": "You are a careful reasoning assistant."},
144
- {"role": "user", "content": prompt},
145
- ],
146
- temperature=0.3,
147
- max_tokens=max_tokens,
148
- )
149
- return resp.choices[0].message.content.strip()
 
 
 
 
 
150
 
151
  # ---- Workflow nodes
152
  def _analyze_question(self, state: AgentState) -> AgentState:
@@ -184,7 +189,7 @@ class BasicAgent:
184
  except (json.JSONDecodeError, KeyError):
185
  pass # Not a JSON question or no file_url
186
  except Exception as e:
187
- state["logs"]["file_download_error"] = str(e)
188
  state["current_step"] = "answer"
189
  return state
190
 
@@ -195,18 +200,18 @@ class BasicAgent:
195
  "{\n 'needs_search': bool,\n 'search_query': str\n} \n\n"
196
  f"Question: {state['question']}"
197
  )
198
- raw = self._call_llm(prompt)
199
  try:
 
200
  decision = ast.literal_eval(raw)
201
  state["needs_search"] = bool(decision.get("needs_search", False))
202
  state["search_query"] = decision.get("search_query", state["question"])
203
- except Exception:
 
 
204
  state["needs_search"] = True
205
  state["search_query"] = state["question"]
206
  decision = {"parse_error": raw}
207
- state["logs"] = {
208
- "analyze": {"prompt": prompt, "llm_response": raw, "decision": decision}
209
- }
210
  state["current_step"] = "search" if state["needs_search"] else "answer"
211
  state["history"].append({"step": "analyze", "output": decision})
212
  return state
@@ -366,19 +371,35 @@ MATERIALS:
366
 
367
  Think step-by-step. Write ANSWER: <answer> on its own line.
368
  """
369
- raw = self._call_llm(prompt, 300)
370
- answer = raw.split("ANSWER:")[-1].strip()
371
-
372
- # Validate answer
373
- if not answer:
374
- answer = "I cannot provide a definitive answer at this time."
375
- elif any(k in answer.lower() for k in ["i cannot find", "sorry"]):
376
- # Fall back to a more general response
377
- answer = "Based on the available information, I cannot provide a complete answer."
378
-
379
- state["final_answer"] = answer
380
- state["history"].append({"step": "answer", "output": raw})
381
- state["current_step"] = "done"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
382
  return state
383
 
384
  # ---- Build LangGraph workflow
 
137
 
138
  # ---- Low‑level LLM call
139
  def _call_llm(self, prompt: str, max_tokens: int = 256) -> str:
140
+ try:
141
+ resp = self.llm.chat.completions.create(
142
+ model="gpt-4o-mini",
143
+ messages=[
144
+ {"role": "system", "content": "You are a careful reasoning assistant."},
145
+ {"role": "user", "content": prompt},
146
+ ],
147
+ temperature=0.3,
148
+ max_tokens=max_tokens,
149
+ )
150
+ return resp.choices[0].message.content.strip()
151
+ except Exception as e:
152
+ print(f"\nLLM Error: {str(e)}")
153
+ print(f"Prompt that caused error:\n{prompt}")
154
+ raise
155
 
156
  # ---- Workflow nodes
157
  def _analyze_question(self, state: AgentState) -> AgentState:
 
189
  except (json.JSONDecodeError, KeyError):
190
  pass # Not a JSON question or no file_url
191
  except Exception as e:
192
+ print(f"\nFile handling error: {str(e)}")
193
  state["current_step"] = "answer"
194
  return state
195
 
 
200
  "{\n 'needs_search': bool,\n 'search_query': str\n} \n\n"
201
  f"Question: {state['question']}"
202
  )
 
203
  try:
204
+ raw = self._call_llm(prompt)
205
  decision = ast.literal_eval(raw)
206
  state["needs_search"] = bool(decision.get("needs_search", False))
207
  state["search_query"] = decision.get("search_query", state["question"])
208
+ except Exception as e:
209
+ print(f"\nLLM Error in question analysis: {str(e)}")
210
+ print(f"Raw response: {raw}")
211
  state["needs_search"] = True
212
  state["search_query"] = state["question"]
213
  decision = {"parse_error": raw}
214
+
 
 
215
  state["current_step"] = "search" if state["needs_search"] else "answer"
216
  state["history"].append({"step": "analyze", "output": decision})
217
  return state
 
371
 
372
  Think step-by-step. Write ANSWER: <answer> on its own line.
373
  """
374
+ try:
375
+ raw = self._call_llm(prompt, 300)
376
+ answer = raw.split("ANSWER:")[-1].strip()
377
+
378
+ # Validate answer
379
+ if not answer:
380
+ print("\nLLM Warning: Empty answer received")
381
+ print(f"Raw response: {raw}")
382
+ answer = "I cannot provide a definitive answer at this time."
383
+ elif any(k in answer.lower() for k in ["i cannot find", "sorry"]):
384
+ print("\nLLM Warning: LLM indicated it couldn't find an answer")
385
+ print(f"Raw response: {raw}")
386
+ answer = "Based on the available information, I cannot provide a complete answer."
387
+ elif "ANSWER:" not in raw:
388
+ print("\nLLM Warning: Response missing ANSWER: prefix")
389
+ print(f"Raw response: {raw}")
390
+ answer = "I cannot provide a definitive answer at this time."
391
+
392
+ state["final_answer"] = answer
393
+ state["history"].append({"step": "answer", "output": raw})
394
+ state["current_step"] = "done"
395
+
396
+ except Exception as e:
397
+ print(f"\nLLM Error in answer generation: {str(e)}")
398
+ print(f"Question: {state['question']}")
399
+ print(f"Materials:\n{search_block}")
400
+ state["final_answer"] = "I encountered an error while generating the answer."
401
+ state["current_step"] = "done"
402
+
403
  return state
404
 
405
  # ---- Build LangGraph workflow