Essi commited on
Commit
cd58f1c
·
1 Parent(s): 9df38aa

refactor: Update GAIAAgent workflow and improve context handling in processing

Browse files
Files changed (1) hide show
  1. app.py +17 -15
app.py CHANGED
@@ -85,10 +85,8 @@ def web_search(query: str) -> str:
85
  # HELPER FUNCTIONS #
86
  # --------------------------------------------------------------------------- #
87
  def _needs_calc(q: str) -> bool:
88
- return bool(
89
- re.search(r"\b(percent|ratio|average|difference|total)\b", q.lower())
90
- or re.search(r"[\+\-\*/]", q)
91
- )
92
 
93
 
94
  def _extract_search_terms(question: str) -> str:
@@ -104,10 +102,9 @@ def _summarize_results(results_json: str, max_hits: int = 3) -> str:
104
  """Turn JSON list of hits into a compact text context for the LLM."""
105
  try:
106
  hits = json.loads(results_json)[:max_hits]
 
107
  except Exception:
108
  return ""
109
- bullets = [f"- {h['title']}: {h['snippet']}" for h in hits]
110
- return "\n".join(bullets)
111
 
112
  # --------------------------------------------------------------------------- #
113
  # ------------------------------- AGENT STATE ----------------------------- #
@@ -128,8 +125,8 @@ class GAIAAgent:
128
  """LangGraph-powered agent targeting GAIA Level-1 tasks."""
129
 
130
  SYSTEM_PROMPT = (
131
- "You are an expert question-answering agent. "
132
- "Return ONLY the final answer—no rationale, no extra words."
133
  )
134
 
135
  def __init__(self):
@@ -156,15 +153,15 @@ class GAIAAgent:
156
 
157
  # Add nodes
158
  workflow.add_node("analyze_question", self._analyze_question)
159
- workflow.add_node("search_or_calc", self._search_or_calc)
160
  workflow.add_node("process_info", self._process_info)
161
  workflow.add_node("generate_answer", self._generate_answer)
162
  workflow.add_node("normalize_answer", self._normalize_answer)
163
 
164
  # Add edges
165
  workflow.set_entry_point("analyze_question")
166
- workflow.add_edge("analyze_question", "search_or_calc")
167
- workflow.add_edge("search_or_calc", "process_info")
168
  workflow.add_edge("process_info", "generate_answer")
169
  workflow.add_edge("generate_answer", "normalize_answer")
170
  workflow.add_edge("normalize_answer", END)
@@ -178,7 +175,7 @@ class GAIAAgent:
178
  state["reasoning_steps"] = [f"Analyze: {q[:60]}"]
179
  return state
180
 
181
- def _search_or_calc(self, state: AgentState) -> AgentState:
182
  q = state["question"]
183
 
184
  # 1️⃣ Calculator path
@@ -207,10 +204,15 @@ class GAIAAgent:
207
  def _process_info(self, state: AgentState) -> AgentState:
208
  if state["answer"]:
209
  # If calc already produced an answer, just pass through
 
210
  return state
211
 
212
  # Summarize search results for the LLM
213
- state["context"] = _summarize_results(state["search_results"])
 
 
 
 
214
  state["reasoning_steps"].append("Process")
215
  return state
216
 
@@ -229,8 +231,8 @@ class GAIAAgent:
229
  )
230
  ),
231
  ]
232
- rsp = self.llm.invoke(prompt)
233
- state["answer"] = rsp.content.strip()
234
  state["reasoning_steps"].append("Generate Answer")
235
  return state
236
 
 
85
  # HELPER FUNCTIONS #
86
  # --------------------------------------------------------------------------- #
87
  def _needs_calc(q: str) -> bool:
88
+ math_expr = re.compile(r"^\s*[\d\.\s\+\-\*/\(\)]+?\s*$")
89
+ return bool(math_expr.match(q))
 
 
90
 
91
 
92
  def _extract_search_terms(question: str) -> str:
 
102
  """Turn JSON list of hits into a compact text context for the LLM."""
103
  try:
104
  hits = json.loads(results_json)[:max_hits]
105
+ return "\n".join(f"- {h['title']}: {h['snippet']}" for h in hits)
106
  except Exception:
107
  return ""
 
 
108
 
109
  # --------------------------------------------------------------------------- #
110
  # ------------------------------- AGENT STATE ----------------------------- #
 
125
  """LangGraph-powered agent targeting GAIA Level-1 tasks."""
126
 
127
  SYSTEM_PROMPT = (
128
+ "You are an expert answer bot. "
129
+ "Return ONLY the final answer string—no extra words."
130
  )
131
 
132
  def __init__(self):
 
153
 
154
  # Add nodes
155
  workflow.add_node("analyze_question", self._analyze_question)
156
+ workflow.add_node("route", self._route)
157
  workflow.add_node("process_info", self._process_info)
158
  workflow.add_node("generate_answer", self._generate_answer)
159
  workflow.add_node("normalize_answer", self._normalize_answer)
160
 
161
  # Add edges
162
  workflow.set_entry_point("analyze_question")
163
+ workflow.add_edge("analyze_question", "route")
164
+ workflow.add_edge("route", "process_info")
165
  workflow.add_edge("process_info", "generate_answer")
166
  workflow.add_edge("generate_answer", "normalize_answer")
167
  workflow.add_edge("normalize_answer", END)
 
175
  state["reasoning_steps"] = [f"Analyze: {q[:60]}"]
176
  return state
177
 
178
+ def _route(self, state: AgentState) -> AgentState:
179
  q = state["question"]
180
 
181
  # 1️⃣ Calculator path
 
204
  def _process_info(self, state: AgentState) -> AgentState:
205
  if state["answer"]:
206
  # If calc already produced an answer, just pass through
207
+ state["context"] = ""
208
  return state
209
 
210
  # Summarize search results for the LLM
211
+ summary = _summarize_results(state["search_results"])
212
+ if not summary:
213
+ summary = "No useful web context found. Rely on your prior knowledge."
214
+
215
+ state["context"] = "summary"
216
  state["reasoning_steps"].append("Process")
217
  return state
218
 
 
231
  )
232
  ),
233
  ]
234
+ response = self.llm.invoke(prompt)
235
+ state["answer"] = response.content.strip()
236
  state["reasoning_steps"].append("Generate Answer")
237
  return state
238