File size: 19,446 Bytes
c922f8b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
058f0b3
 
 
 
 
 
 
 
c922f8b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
058f0b3
c922f8b
 
 
058f0b3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c922f8b
058f0b3
 
 
 
c922f8b
 
058f0b3
 
c922f8b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
058f0b3
c922f8b
058f0b3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c922f8b
 
058f0b3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c922f8b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60e4487
 
c922f8b
 
 
 
 
 
058f0b3
c922f8b
 
 
 
058f0b3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c922f8b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
058f0b3
60e4487
058f0b3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c922f8b
058f0b3
c922f8b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60e4487
 
c922f8b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60e4487
c922f8b
 
 
 
 
 
60e4487
 
 
 
c922f8b
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
"""
LangGraph workflow definition for the GAIA agent.

This module defines the workflow graph for the GAIA agent using LangGraph.
It implements nodes for question analysis, planning, tool selection,
tool execution, and answer formulation, as well as the edges between them.

The graph is designed to be integrated with the GAIAAgent class and can be
extended with additional nodes and edges as needed.
"""

import logging
import json
import re
import traceback
import openai
from typing import Dict, Any, List, Tuple, Optional, Annotated, TypedDict, Literal, Union, cast

from langchain.prompts import PromptTemplate
from langchain_core.messages import HumanMessage, AIMessage
from langchain_core.runnables import RunnableConfig
from langchain_core.output_parsers import StrOutputParser

from langgraph.graph import StateGraph, END

from src.gaia.agent.config import get_model_config, get_agent_config, VERBOSE
from src.gaia.agent.tool_registry import (
    ToolRegistry,
    create_default_registry,
    search,
    analyze_query,
    resolve_question_type
)
from src.gaia.memory import WorkingMemory, ResultCache

logger = logging.getLogger("gaia_agent.graph")

class QuestionAnalysis(TypedDict):
    """Type for question analysis results."""
    question_type: str
    required_tools: List[str]
    information_sources: List[str]
    complexity: str

class PlanStep(TypedDict):
    """Type for a single step in the plan."""
    step_number: int
    description: str
    tool: Optional[str]
    tool_input: Optional[Dict[str, Any]]

class ToolResult(TypedDict):
    """Type for tool execution results."""
    tool_name: str
    success: bool
    result: Any
    error: Optional[str]

class AgentState(TypedDict):
    """Type for agent state."""
    question: str
    analysis: Optional[QuestionAnalysis]
    plan: Optional[List[PlanStep]]
    current_step: Optional[int]
    tool_results: Optional[List[ToolResult]]
    reasoning: Optional[str]
    answer: Optional[str]

def analyze_question(state: AgentState) -> AgentState:
    """
    Analyze the question to determine its type and required tools.
    
    Args:
        state: Current agent state
        
    Returns:
        Updated agent state with analysis
    """
    logger.info("Analyzing question")
    
    question = state["question"]
    
    # Detect question type using the tool_registry analyze function
    question_analysis = analyze_query(question)
    question_type = resolve_question_type(question)
    
    # Determine required tools based on question analysis
    required_tools = []
    
    # Web search is needed for most questions
    required_tools.append("web_search")
    
    # Add specific tools based on question type
    if "video" in question.lower() or "youtube" in question.lower():
        required_tools.append("youtube_video")
    
    if "article" in question.lower() or "paper" in question.lower() or "research" in question.lower():
        required_tools.append("arxiv_search")
    
    # For complex questions, add perplexity
    if question_analysis["depth_needed"] == "high":
        required_tools.append("perplexity_search")
    
    # Check for reversed text
    if question_type == "reversed_text":
        required_tools.append("text_manipulation")
    
    # Create analysis object
    analysis = {
        "question_type": question_type,
        "required_tools": required_tools,
        "information_sources": question_analysis["preferred_sources"],
        "complexity": question_analysis["depth_needed"]
    }
    
    logger.info(f"Question analysis: {analysis}")
    
    # Update state
    return {
        **state,
        "analysis": analysis
    }

def create_plan(state: AgentState) -> AgentState:
    """
    Create a plan for answering the question.
    
    Args:
        state: Current agent state
        
    Returns:
        Updated agent state with plan
    """
    logger.info("Creating plan")
    
    question = state["question"]
    analysis = state["analysis"]
    
    if not analysis:
        logger.warning("No question analysis available, creating default plan")
        analysis = {
            "question_type": "factual",
            "required_tools": ["web_search"],
            "information_sources": ["web"],
            "complexity": "medium"
        }
    
    # Create plan based on question type and required tools
    plan = []
    step_num = 1
    
    # Handle reversed text questions
    if analysis["question_type"] == "reversed_text":
        # If text is reversed, handle it first
        plan.append({
            "step_number": step_num,
            "description": "Reverse the text in the question to understand it",
            "tool": "text_manipulation",
            "tool_input": {"text": question, "operation": "reverse"}
        })
        step_num += 1
    
    # Web search is the primary tool for most questions
    if "web_search" in analysis["required_tools"]:
        plan.append({
            "step_number": step_num,
            "description": "Search for information using web search",
            "tool": "web_search",
            "tool_input": {"query": question}
        })
        step_num += 1
    
    # Add specialized tools as needed
    if "youtube_video" in analysis["required_tools"]:
        plan.append({
            "step_number": step_num,
            "description": "Analyze video content using YouTube tool",
            "tool": "youtube_video",
            "tool_input": {"query": question}
        })
        step_num += 1
    
    if "arxiv_search" in analysis["required_tools"]:
        plan.append({
            "step_number": step_num,
            "description": "Search for academic papers on arXiv",
            "tool": "arxiv_search",
            "tool_input": {"query": question}
        })
        step_num += 1
    
    if "perplexity_search" in analysis["required_tools"]:
        plan.append({
            "step_number": step_num,
            "description": "Use Perplexity for in-depth information",
            "tool": "perplexity_search",
            "tool_input": {"query": question}
        })
        step_num += 1
    
    # Always add analysis and answer formulation step
    plan.append({
        "step_number": step_num,
        "description": "Analyze all retrieved information and formulate answer",
        "tool": None,
        "tool_input": None
    })
    
    logger.info(f"Created plan with {len(plan)} steps")
    
    # Update state
    return {
        **state,
        "plan": plan,
        "current_step": 0,
        "tool_results": []  # Initialize tool_results as an empty list
    }

def execute_tool(state: AgentState) -> AgentState:
    """
    Execute the current tool in the plan.
    
    Args:
        state: Current agent state
        
    Returns:
        Updated agent state with tool results
    """
    current_step = state["current_step"]
    plan = state["plan"]
    
    if current_step is None or plan is None or current_step >= len(plan):
        return state
    
    step = plan[current_step]
    tool_name = step.get("tool")
    
    logger.info(f"Executing step {current_step}: {step}")
    
    if not tool_name:
        return {
            **state,
            "current_step": current_step + 1
        }
    
    tool_input = step.get("tool_input", {})
    
    # Get existing tool results or initialize as empty list if None
    tool_results = state.get("tool_results", []) or []
    
    try:
        logger.info(f"Executing tool: {tool_name}")
        
        # Create tool registry
        registry = create_default_registry()
        
        # Execute the appropriate tool
        result = None
        if tool_name == "web_search":
            query = tool_input.get("query", state["question"])
            search_results = search(registry, query, format_type="unified")
            result = search_results
        
        elif tool_name == "youtube_video":
            query = tool_input.get("query", state["question"])
            # Extract YouTube URL
            youtube_urls = re.findall(r'https?://(?:www\.)?youtube\.com/watch\?v=[\w-]+', query)
            if not youtube_urls:
                youtube_urls = re.findall(r'https?://(?:www\.)?youtu\.be/[\w-]+', query)
            
            if youtube_urls:
                video_url = youtube_urls[0]
                if registry.get_tool("youtube_video"):
                    result = registry.execute_tool("youtube_video", url=video_url)
                else:
                    raise Exception("YouTube video tool not available")
            else:
                raise Exception("No YouTube URL found in query")
        
        elif tool_name == "arxiv_search":
            query = tool_input.get("query", state["question"])
            if registry.get_tool("arxiv_search"):
                result = registry.execute_tool("arxiv_search", query=query)
            else:
                raise Exception("ArXiv search tool not available")
        
        elif tool_name == "perplexity_search":
            query = tool_input.get("query", state["question"])
            if registry.get_tool("perplexity_search"):
                result = registry.execute_tool("perplexity_search", query=query)
            else:
                raise Exception("Perplexity search tool not available")
                
        elif tool_name == "text_manipulation":
            text = tool_input.get("text", state["question"])
            operation = tool_input.get("operation", "reverse")
            
            if operation == "reverse":
                result = {'reversed_text': text[::-1]}
            else:
                result = {'original_text': text}
        
        # Create tool result object
        tool_result = {
            "tool_name": tool_name,
            "success": True,
            "result": result,
            "error": None
        }
        
        logger.info(f"Tool {tool_name} executed successfully")
        
    except Exception as e:
        logger.error(f"Error executing tool {tool_name}: {str(e)}")
        logger.debug(traceback.format_exc())
        
        # Record the error
        tool_result = {
            "tool_name": tool_name,
            "success": False,
            "result": None,
            "error": str(e)
        }
    
    # Update state
    return {
        **state,
        "tool_results": tool_results + [tool_result],
        "current_step": current_step + 1
    }

def formulate_answer(state: AgentState) -> AgentState:
    """
    Formulate an answer based on tool results and reasoning.
    
    Args:
        state: Current agent state
        
    Returns:
        Updated agent state with answer
    """
    logger.info("Formulating answer")
    logger.info(f"State: {state}")
    
    question = state["question"]
    tool_results = state["tool_results"] or []
    
    # Extract relevant information from tool results
    web_search_data = []
    youtube_data = None
    arxiv_data = []
    perplexity_data = None
    reversed_text = None
    
    for tool_result in tool_results:
        if not tool_result["success"]:
            continue
        
        if tool_result["tool_name"] == "web_search":
            result = tool_result["result"]
            if result and "results" in result:
                web_search_data = result["results"]
        
        elif tool_result["tool_name"] == "youtube_video":
            youtube_data = tool_result["result"]
        
        elif tool_result["tool_name"] == "arxiv_search":
            arxiv_data = tool_result["result"]
        
        elif tool_result["tool_name"] == "perplexity_search":
            result = tool_result["result"]
            if result and "content" in result:
                perplexity_data = result["content"]
        
        elif tool_result["tool_name"] == "text_manipulation":
            result = tool_result["result"]
            if result and "reversed_text" in result:
                reversed_text = result["reversed_text"]
    
    # First check if this is a reversed text question
    if reversed_text:
        logger.info("Processing reversed text question")
        # The reversed text is the original question in the correct order
        # Parse it to extract what's being asked
        original_question = reversed_text
        
        # Example pattern: "If you understand this sentence, write the opposite of the word "left" as the answer."
        # Look for common patterns in reversed text questions
        if "opposite" in original_question and "word" in original_question:
            # Extract the word to find the opposite of
            match = re.search(r'opposite of the word (?:"|")?(\w+)(?:"|")?', original_question)
            if match:
                word = match.group(1).lower()
                
                # Define common opposites
                opposites = {
                    "left": "right",
                    "right": "left",
                    "up": "down",
                    "down": "up",
                    "black": "white",
                    "white": "black",
                    "yes": "no",
                    "no": "yes",
                    "hot": "cold",
                    "cold": "hot",
                    "big": "small",
                    "small": "big",
                    "tall": "short",
                    "short": "tall"
                }
                
                if word in opposites:
                    answer = opposites[word]
                    return {
                        **state,
                        "reasoning": f"This is a reversed text question asking for the opposite of '{word}'.",
                        "answer": answer
                    }
    
    # Now process normally based on collected data
    reasoning = "Based on the information I've gathered:\n\n"
    
    # Process web search data
    if web_search_data:
        reasoning += "From web search results:\n"
        for i, result in enumerate(web_search_data[:3]):
            reasoning += f"- {result.get('title', 'Untitled')}: {result.get('snippet', 'No snippet')}...\n"
    
    # Process YouTube data
    if youtube_data:
        reasoning += "\nFrom the YouTube video:\n"
        if isinstance(youtube_data, dict):
            reasoning += f"- Title: {youtube_data.get('title', 'Unknown')}\n"
            reasoning += f"- Content: {youtube_data.get('content', 'No content extracted')}...\n"
        elif isinstance(youtube_data, str):
            reasoning += f"- Content: {youtube_data}...\n"
    
    # Process arXiv data
    if arxiv_data:
        reasoning += "\nFrom academic papers:\n"
        for i, paper in enumerate(arxiv_data[:2]):
            if isinstance(paper, dict):
                reasoning += f"- {paper.get('title', 'Untitled')}: {paper.get('summary', 'No summary')[:100]}...\n"
    
    # Process Perplexity data
    if perplexity_data:
        reasoning += "\nFrom Perplexity AI:\n"
        reasoning += f"{perplexity_data[:500]}...\n"
    
    # Formulate answer based on collected data
    answer = ""
    
    # If we have Perplexity data, it's often comprehensive enough
    if perplexity_data:
        # Extract the most relevant parts of the perplexity answer
        answer = perplexity_data.split('\n')[0]  # First paragraph
        
        # For specific question types, extract the specific answer
        if "how many" in question.lower():
            # Look for numbers in the perplexity data
            numbers = re.findall(r'\d+', perplexity_data)
            if numbers:
                # Typically the first number is the answer
                answer = numbers[0]
        
        elif "who" in question.lower():
            # Look for names (capitalized words)
            names = re.findall(r'[A-Z][a-z]+ [A-Z][a-z]+', perplexity_data)
            if names:
                answer = names[0]
    
    # If no answer yet, use web search data
    if not answer and web_search_data:
        # Extract the most relevant information from the top result
        if web_search_data:
            top_result = web_search_data[0]
            answer = top_result.get('snippet', '')
    
    # If we have YouTube data and the question is about a video
    if "video" in question.lower() and youtube_data:
        if isinstance(youtube_data, dict) and "content" in youtube_data:
            answer = youtube_data["content"]
    
    # If still no answer, provide a generic response
    if not answer:
        answer = "I couldn't find enough information to answer your question accurately."
    
    # Ensure we have a proper answer
    answer = answer.strip()
    if not answer.endswith((".", "!", "?")):
        answer += "."
    
    logger.info("Answer formulation complete")
    
    # Update state
    return {
        **state,
        "reasoning": reasoning,
        "answer": answer
    }

def should_continue(state: AgentState) -> Literal["continue", "complete"]:
    """
    Determine if the agent should continue executing the plan or is done.
    
    Args:
        state: Current agent state
        
    Returns:
        "continue" if there are more steps to execute, "complete" otherwise
    """
    current_step = state.get("current_step")
    plan = state.get("plan", [])
    
    if current_step is None:
        return "continue"
    
    if current_step < len(plan):
        return "continue"
    
    return "complete"

def run_agent_graph(initial_state: Dict[str, Any], config: Dict[str, Any]) -> Dict[str, Any]:
    """
    Run the agent graph with the initial state.
    
    Args:
        initial_state: Initial state dictionary, must include "question"
        config: Agent configuration dictionary
        
    Returns:
        Final state with answer and reasoning
    """
    logger.info("Running agent graph")
    logger.info(f"Initial state: {initial_state}")
    logger.info(f"Config: {config}")
    
    try:
        # Create initial state
        state: AgentState = {
            "question": initial_state.get("question", ""),
            "analysis": None,
            "plan": None,
            "current_step": None,
            "tool_results": [],  # Initialize as empty list rather than None
            "reasoning": None,
            "answer": None
        }
        
        # Define the graph
        state = analyze_question(state)
        state = create_plan(state)
        
        # Execute tools until done
        while should_continue(state) == "continue":
            state = execute_tool(state)
        
        state = formulate_answer(state)
        
        logger.info("Agent graph completed successfully")
        
        # Return the final state
        final_result = {
            "answer": state.get("answer", "I couldn't find an answer to your question."),
            "reasoning": state.get("reasoning", ""),
            "plan": state.get("plan", []),
            "tool_results": state.get("tool_results", [])
        }
        
        logger.info(f"Final result: {final_result['answer'][:100]}...")
        
        return final_result
        
    except Exception as e:
        logger.error(f"Error running agent graph: {str(e)}")
        logger.debug(traceback.format_exc())
        
        return {
            "answer": f"I encountered an error while processing your question: {str(e)}",
            "reasoning": f"An error occurred: {str(e)}",
            "plan": [],
            "tool_results": []
        }