Spaces:
Configuration error
Configuration error
oremaz
commited on
Update agent.py
Browse files
agent.py
CHANGED
|
@@ -16,13 +16,12 @@ from llama_index.tools.arxiv import ArxivToolSpec
|
|
| 16 |
import duckduckgo_search as ddg
|
| 17 |
import re
|
| 18 |
from llama_index.core.agent.workflow import ReActAgent
|
| 19 |
-
from llama_index.llms.
|
| 20 |
|
| 21 |
-
text_llm =
|
| 22 |
-
model="
|
| 23 |
-
api_key=os.
|
| 24 |
)
|
| 25 |
-
|
| 26 |
multimodal_llm = text_llm
|
| 27 |
|
| 28 |
|
|
@@ -217,6 +216,8 @@ analysis_agent = FunctionAgent(
|
|
| 217 |
""",
|
| 218 |
llm=multimodal_llm,
|
| 219 |
tools=[enhanced_rag_tool, cross_document_tool],
|
|
|
|
|
|
|
| 220 |
)
|
| 221 |
|
| 222 |
|
|
@@ -308,43 +309,12 @@ def enhanced_smart_research_tool(query: str, task_context: str = "", max_results
|
|
| 308 |
full_query = f"{query} {task_context}".strip()
|
| 309 |
return intelligent_router.detect_intent_and_route(full_query)
|
| 310 |
|
| 311 |
-
|
| 312 |
fn=enhanced_smart_research_tool,
|
| 313 |
name="Enhanced Research Tool",
|
| 314 |
description="Intelligent research tool that discriminates between scientific (ArXiv) and general (web) research with deep content extraction"
|
| 315 |
)
|
| 316 |
|
| 317 |
-
# Updated research agent
|
| 318 |
-
research_agent = FunctionAgent(
|
| 319 |
-
name="ResearchAgent",
|
| 320 |
-
description="Advanced research agent that automatically routes between scientific and general research sources",
|
| 321 |
-
system_prompt="""
|
| 322 |
-
You are an advanced research specialist that automatically discriminates between:
|
| 323 |
-
|
| 324 |
-
**Scientific Research** → ArXiv
|
| 325 |
-
- Academic papers, research studies
|
| 326 |
-
- Technical algorithms and methods
|
| 327 |
-
- Scientific experiments and theories
|
| 328 |
-
|
| 329 |
-
**General Research** → Web Search with Content Extraction
|
| 330 |
-
- Current events and news
|
| 331 |
-
- General factual information
|
| 332 |
-
- How-to guides and technical documentation
|
| 333 |
-
- Weather, locations, biographical info
|
| 334 |
-
|
| 335 |
-
You automatically:
|
| 336 |
-
1. **Route queries** to the most appropriate source
|
| 337 |
-
2. **Extract deep content** from web pages (not just snippets)
|
| 338 |
-
3. **Analyze and synthesize** information comprehensively
|
| 339 |
-
4. **Provide detailed answers** with source attribution
|
| 340 |
-
|
| 341 |
-
Always focus on extracting the most relevant information for the GAIA task.
|
| 342 |
-
""",
|
| 343 |
-
llm=text_llm,
|
| 344 |
-
tools=[enhanced_research_tool_func],
|
| 345 |
-
)
|
| 346 |
-
|
| 347 |
-
|
| 348 |
def execute_python_code(code: str) -> str:
|
| 349 |
try:
|
| 350 |
safe_globals = {
|
|
@@ -392,6 +362,7 @@ code_agent = ReActAgent(
|
|
| 392 |
""",
|
| 393 |
llm=text_llm,
|
| 394 |
tools=[code_execution_tool],
|
|
|
|
| 395 |
)
|
| 396 |
|
| 397 |
# Créer des outils à partir des agents
|
|
@@ -399,9 +370,6 @@ def analysis_function(query: str, files=None):
|
|
| 399 |
ctx = Context(analysis_agent)
|
| 400 |
return analysis_agent.run(query, ctx=ctx)
|
| 401 |
|
| 402 |
-
def research_function(query: str):
|
| 403 |
-
ctx = Context(research_agent)
|
| 404 |
-
return research_agent.run(query, ctx=ctx)
|
| 405 |
|
| 406 |
def code_function(query: str):
|
| 407 |
ctx = Context(code_agent)
|
|
@@ -413,12 +381,6 @@ analysis_tool = FunctionTool.from_defaults(
|
|
| 413 |
description="Advanced multimodal analysis using enhanced RAG"
|
| 414 |
)
|
| 415 |
|
| 416 |
-
research_tool = FunctionTool.from_defaults(
|
| 417 |
-
fn=research_function,
|
| 418 |
-
name="ResearchAgent",
|
| 419 |
-
description="Research agent for scientific and general research"
|
| 420 |
-
)
|
| 421 |
-
|
| 422 |
code_tool = FunctionTool.from_defaults(
|
| 423 |
fn=code_function,
|
| 424 |
name="CodeAgent",
|
|
@@ -445,9 +407,7 @@ class EnhancedGAIAAgent:
|
|
| 445 |
1. THINK: Analyze the GAIA question thoroughly
|
| 446 |
2. ACT: Use your specialist tools IF RELEVANT
|
| 447 |
3. OBSERVE: Review results from specialist tools
|
| 448 |
-
4.
|
| 449 |
-
5. ACT: Either use another tool or provide final precise answer
|
| 450 |
-
6. FORMAT: Ensure answer is EXACT GAIA format (number only, word only, etc.)
|
| 451 |
|
| 452 |
|
| 453 |
IMPORTANT: Use tools strategically - only when their specific expertise is needed.
|
|
@@ -460,7 +420,8 @@ class EnhancedGAIAAgent:
|
|
| 460 |
- NO explanations, NO additional text, ONLY the precise answer
|
| 461 |
""",
|
| 462 |
llm=text_llm,
|
| 463 |
-
tools=[analysis_tool, research_tool, code_tool]
|
|
|
|
| 464 |
)
|
| 465 |
|
| 466 |
async def solve_gaia_question(self, question_data: Dict[str, Any]) -> str:
|
|
|
|
| 16 |
import duckduckgo_search as ddg
|
| 17 |
import re
|
| 18 |
from llama_index.core.agent.workflow import ReActAgent
|
| 19 |
+
from llama_index.llms.openrouter import OpenRouter
|
| 20 |
|
| 21 |
+
text_llm = OpenRouter(
|
| 22 |
+
model="mistralai/mistral-small-3.1-24b-instruct:free", # as listed on OpenRouter
|
| 23 |
+
api_key=os.getenv("OPENROUTER_API_KEY"), # or pass your key directly
|
| 24 |
)
|
|
|
|
| 25 |
multimodal_llm = text_llm
|
| 26 |
|
| 27 |
|
|
|
|
| 216 |
""",
|
| 217 |
llm=multimodal_llm,
|
| 218 |
tools=[enhanced_rag_tool, cross_document_tool],
|
| 219 |
+
max_steps=5,
|
| 220 |
+
|
| 221 |
)
|
| 222 |
|
| 223 |
|
|
|
|
| 309 |
full_query = f"{query} {task_context}".strip()
|
| 310 |
return intelligent_router.detect_intent_and_route(full_query)
|
| 311 |
|
| 312 |
+
research_tool = FunctionTool.from_defaults(
|
| 313 |
fn=enhanced_smart_research_tool,
|
| 314 |
name="Enhanced Research Tool",
|
| 315 |
description="Intelligent research tool that discriminates between scientific (ArXiv) and general (web) research with deep content extraction"
|
| 316 |
)
|
| 317 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 318 |
def execute_python_code(code: str) -> str:
|
| 319 |
try:
|
| 320 |
safe_globals = {
|
|
|
|
| 362 |
""",
|
| 363 |
llm=text_llm,
|
| 364 |
tools=[code_execution_tool],
|
| 365 |
+
max_steps = 5
|
| 366 |
)
|
| 367 |
|
| 368 |
# Créer des outils à partir des agents
|
|
|
|
| 370 |
ctx = Context(analysis_agent)
|
| 371 |
return analysis_agent.run(query, ctx=ctx)
|
| 372 |
|
|
|
|
|
|
|
|
|
|
| 373 |
|
| 374 |
def code_function(query: str):
|
| 375 |
ctx = Context(code_agent)
|
|
|
|
| 381 |
description="Advanced multimodal analysis using enhanced RAG"
|
| 382 |
)
|
| 383 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 384 |
code_tool = FunctionTool.from_defaults(
|
| 385 |
fn=code_function,
|
| 386 |
name="CodeAgent",
|
|
|
|
| 407 |
1. THINK: Analyze the GAIA question thoroughly
|
| 408 |
2. ACT: Use your specialist tools IF RELEVANT
|
| 409 |
3. OBSERVE: Review results from specialist tools
|
| 410 |
+
4. REPEAT: Continue until you have the final answer. If you give a final answer, FORMAT: Ensure answer is EXACT GAIA format (number only, word only, etc.)
|
|
|
|
|
|
|
| 411 |
|
| 412 |
|
| 413 |
IMPORTANT: Use tools strategically - only when their specific expertise is needed.
|
|
|
|
| 420 |
- NO explanations, NO additional text, ONLY the precise answer
|
| 421 |
""",
|
| 422 |
llm=text_llm,
|
| 423 |
+
tools=[analysis_tool, research_tool, code_tool],
|
| 424 |
+
max_steps = 10
|
| 425 |
)
|
| 426 |
|
| 427 |
async def solve_gaia_question(self, question_data: Dict[str, Any]) -> str:
|