naman1102 commited on
Commit
1805291
·
1 Parent(s): 2200521

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -18
app.py CHANGED
@@ -135,26 +135,31 @@ class BasicAgent:
135
  return text.strip()
136
 
137
  def _generate_answer(self, state: AgentState) -> AgentState:
138
- # Format search results for better LLM consumption
139
- search_results = []
140
- for item in state["history"]:
141
- if item.get("step") == "search" and "results" in item:
142
- for i, result in enumerate(item["results"], 1):
143
- search_results.append(f"Result {i}:\n{result}\n")
144
 
145
- history_text = "\n".join(search_results) if search_results else "No search results found."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146
 
147
- prompt = (
148
- f"Answer the user question as directly as possible using the search results below.\n"
149
- f"Question: {state['question']}\n\n"
150
- f"Search Results:\n{history_text}\n\n"
151
- "Give ONLY the final answer without extra formatting or explanation.\n"
152
- "If you cannot find a definitive answer in the search results, say 'I cannot find a definitive answer to this question.'"
153
- )
154
- answer = self._call_llm(prompt, max_tokens=150)
155
- state["final_answer"] = answer.strip()
156
- state["history"].append({"step": "answer", "output": answer})
157
- state["logs"]["final_answer"] = {"prompt": prompt, "response": answer}
158
  state["current_step"] = "done"
159
  return state
160
 
 
135
  return text.strip()
136
 
137
  def _generate_answer(self, state: AgentState) -> AgentState:
138
+ # Get the last search results
139
+ search_block = "\n".join(state["history"][-1]["results"]) # last search step
 
 
 
 
140
 
141
+ prompt = f"""
142
+ You are an expert fact-extractor. Using ONLY the text below, answer the question.
143
+
144
+ Question:
145
+ {state['question']}
146
+
147
+ Search snippets (bold terms are highlighted):
148
+ {search_block}
149
+
150
+ Think step-by-step. Quote exact numbers/names if needed.
151
+ END EACH STEP with ➤. After reasoning, output:
152
+
153
+ ANSWER: <the short answer here>
154
+
155
+ No other text.
156
+ """
157
+ raw = self._call_llm(prompt, 300)
158
+ answer = raw.splitlines()[-1].replace("ANSWER:", "").strip()
159
 
160
+ state["final_answer"] = answer
161
+ state["history"].append({"step": "answer", "output": raw}) # Store full response for debugging
162
+ state["logs"]["final_answer"] = {"prompt": prompt, "response": raw}
 
 
 
 
 
 
 
 
163
  state["current_step"] = "done"
164
  return state
165