Update app.py
Browse files
app.py
CHANGED
@@ -21,13 +21,13 @@ HF_TOKEN = os.getenv("HF_TOKEN") # Make sure to set this environment variable
|
|
21 |
|
22 |
|
23 |
class AgentState(TypedDict):
|
24 |
-
question: Annotated[
|
25 |
-
current_step: Annotated[str,
|
26 |
tool_output: Annotated[str, "input"]
|
27 |
final_answer: Annotated[str, "input"]
|
28 |
history: Annotated[List[Dict[str, str]], operator.add]
|
29 |
needs_more_info: Annotated[bool, "input"]
|
30 |
-
search_query: Annotated[str,
|
31 |
|
32 |
class BasicAgent:
|
33 |
def __init__(self):
|
@@ -103,10 +103,7 @@ class BasicAgent:
|
|
103 |
|
104 |
def _analyze_question(self, state: AgentState) -> AgentState:
|
105 |
"""Analyze the question and determine the next step."""
|
106 |
-
|
107 |
-
current_question = state["question"][-1]
|
108 |
-
|
109 |
-
prompt = f"""Analyze this question and determine what needs to be done: {current_question}
|
110 |
|
111 |
Return only a valid Python dictionary in this exact format:
|
112 |
{{
|
@@ -130,7 +127,7 @@ Do not include any other text or explanation. Only return the dictionary.
|
|
130 |
print(f"Error parsing LLM response: {e}")
|
131 |
# Default to search if we can't parse the response
|
132 |
state["needs_more_info"] = True
|
133 |
-
state["search_query"] =
|
134 |
state["current_step"] = 'search'
|
135 |
|
136 |
return state
|
@@ -161,13 +158,10 @@ Do not include any other text or explanation. Only return the dictionary.
|
|
161 |
|
162 |
def _generate_final_answer(self, state: AgentState) -> AgentState:
|
163 |
"""Generate the final answer based on all gathered information."""
|
164 |
-
# Get the most recent question from the list
|
165 |
-
current_question = state["question"][-1]
|
166 |
-
|
167 |
history_str = "\n".join([f"{h['step']}: {h.get('output', h.get('results', h.get('error', '')))}"
|
168 |
for h in state["history"]])
|
169 |
|
170 |
-
prompt = f"""Based on the following information and history, provide a final answer to the question: {
|
171 |
|
172 |
History of steps taken:
|
173 |
{history_str}
|
@@ -185,7 +179,7 @@ Do not include any other text or explanation. Only return the dictionary.
|
|
185 |
try:
|
186 |
# Initialize the state
|
187 |
initial_state: AgentState = {
|
188 |
-
"question":
|
189 |
"current_step": "analyze",
|
190 |
"tool_output": "",
|
191 |
"final_answer": "",
|
@@ -270,7 +264,7 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
270 |
try:
|
271 |
# Initialize the state for this question
|
272 |
initial_state = {
|
273 |
-
"question":
|
274 |
"current_step": "analyze",
|
275 |
"tool_output": "",
|
276 |
"final_answer": "",
|
|
|
21 |
|
22 |
|
23 |
class AgentState(TypedDict):
|
24 |
+
question: Annotated[str, operator.or_]
|
25 |
+
current_step: Annotated[str, operator.or_]
|
26 |
tool_output: Annotated[str, "input"]
|
27 |
final_answer: Annotated[str, "input"]
|
28 |
history: Annotated[List[Dict[str, str]], operator.add]
|
29 |
needs_more_info: Annotated[bool, "input"]
|
30 |
+
search_query: Annotated[str, operator.or_]
|
31 |
|
32 |
class BasicAgent:
|
33 |
def __init__(self):
|
|
|
103 |
|
104 |
def _analyze_question(self, state: AgentState) -> AgentState:
|
105 |
"""Analyze the question and determine the next step."""
|
106 |
+
prompt = f"""Analyze this question and determine what needs to be done: {state['question']}
|
|
|
|
|
|
|
107 |
|
108 |
Return only a valid Python dictionary in this exact format:
|
109 |
{{
|
|
|
127 |
print(f"Error parsing LLM response: {e}")
|
128 |
# Default to search if we can't parse the response
|
129 |
state["needs_more_info"] = True
|
130 |
+
state["search_query"] = state["question"]
|
131 |
state["current_step"] = 'search'
|
132 |
|
133 |
return state
|
|
|
158 |
|
159 |
def _generate_final_answer(self, state: AgentState) -> AgentState:
|
160 |
"""Generate the final answer based on all gathered information."""
|
|
|
|
|
|
|
161 |
history_str = "\n".join([f"{h['step']}: {h.get('output', h.get('results', h.get('error', '')))}"
|
162 |
for h in state["history"]])
|
163 |
|
164 |
+
prompt = f"""Based on the following information and history, provide a final answer to the question: {state['question']}
|
165 |
|
166 |
History of steps taken:
|
167 |
{history_str}
|
|
|
179 |
try:
|
180 |
# Initialize the state
|
181 |
initial_state: AgentState = {
|
182 |
+
"question": question, # Now a string, not a list
|
183 |
"current_step": "analyze",
|
184 |
"tool_output": "",
|
185 |
"final_answer": "",
|
|
|
264 |
try:
|
265 |
# Initialize the state for this question
|
266 |
initial_state = {
|
267 |
+
"question": question_text, # Now a string, not a list
|
268 |
"current_step": "analyze",
|
269 |
"tool_output": "",
|
270 |
"final_answer": "",
|