Spaces:
Configuration error
Configuration error
oremaz
commited on
Update agent.py
Browse files
agent.py
CHANGED
|
@@ -13,8 +13,6 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
| 13 |
# LlamaIndex core imports
|
| 14 |
from llama_index.core import VectorStoreIndex, Document, Settings
|
| 15 |
from llama_index.core.agent.workflow import FunctionAgent, ReActAgent, AgentStream
|
| 16 |
-
from llama_index.core.callbacks.base import CallbackManager
|
| 17 |
-
from llama_index.core.callbacks.llama_debug import LlamaDebugHandler
|
| 18 |
from llama_index.core.node_parser import SentenceWindowNodeParser, HierarchicalNodeParser, UnstructuredElementNodeParser
|
| 19 |
from llama_index.core.postprocessor import SentenceTransformerRerank
|
| 20 |
from llama_index.core.query_engine import RetrieverQueryEngine
|
|
@@ -55,7 +53,11 @@ import sys
|
|
| 55 |
|
| 56 |
import weave
|
| 57 |
weave.init("gaia-llamaindex-agents")
|
|
|
|
|
|
|
| 58 |
|
|
|
|
|
|
|
| 59 |
|
| 60 |
def get_max_memory_config(max_memory_per_gpu):
|
| 61 |
"""Generate max_memory config for available GPUs"""
|
|
@@ -101,8 +103,6 @@ embed_model = HuggingFaceEmbedding(
|
|
| 101 |
"low_cpu_mem_usage": True, # Still get memory optimization
|
| 102 |
}
|
| 103 |
)
|
| 104 |
-
llama_debug = LlamaDebugHandler(print_trace_on_end=True)
|
| 105 |
-
callback_manager = CallbackManager([llama_debug])
|
| 106 |
|
| 107 |
logging.basicConfig(level=logging.INFO)
|
| 108 |
logging.getLogger("llama_index.core.agent").setLevel(logging.DEBUG)
|
|
@@ -110,7 +110,6 @@ logging.getLogger("llama_index.llms").setLevel(logging.DEBUG)
|
|
| 110 |
|
| 111 |
Settings.llm = proj_llm
|
| 112 |
Settings.embed_model = embed_model
|
| 113 |
-
Settings.callback_manager = callback_manager
|
| 114 |
|
| 115 |
def read_and_parse_content(input_path: str) -> List[Document]:
|
| 116 |
"""
|
|
@@ -607,9 +606,7 @@ class EnhancedGAIAAgent:
|
|
| 607 |
],
|
| 608 |
llm=proj_llm,
|
| 609 |
max_steps=8,
|
| 610 |
-
verbose=True
|
| 611 |
-
callback_manager=callback_manager,
|
| 612 |
-
)
|
| 613 |
|
| 614 |
self.code_agent = ReActAgent(
|
| 615 |
name="code_agent",
|
|
@@ -618,9 +615,7 @@ class EnhancedGAIAAgent:
|
|
| 618 |
tools=[code_execution_tool],
|
| 619 |
llm=code_llm,
|
| 620 |
max_steps=6,
|
| 621 |
-
verbose=True
|
| 622 |
-
callback_manager=callback_manager,
|
| 623 |
-
)
|
| 624 |
|
| 625 |
# Fixed indentation: coordinator initialization inside __init__
|
| 626 |
self.coordinator = AgentWorkflow(
|
|
|
|
| 13 |
# LlamaIndex core imports
|
| 14 |
from llama_index.core import VectorStoreIndex, Document, Settings
|
| 15 |
from llama_index.core.agent.workflow import FunctionAgent, ReActAgent, AgentStream
|
|
|
|
|
|
|
| 16 |
from llama_index.core.node_parser import SentenceWindowNodeParser, HierarchicalNodeParser, UnstructuredElementNodeParser
|
| 17 |
from llama_index.core.postprocessor import SentenceTransformerRerank
|
| 18 |
from llama_index.core.query_engine import RetrieverQueryEngine
|
|
|
|
| 53 |
|
| 54 |
import weave
|
| 55 |
weave.init("gaia-llamaindex-agents")
|
| 56 |
+
from llama_index.core.global_handler import set_global_handler
|
| 57 |
+
from llama_index.core.callbacks import WeaveCallbackHandler
|
| 58 |
|
| 59 |
+
# Set Weave as the global callback handler
|
| 60 |
+
set_global_handler(WeaveCallbackHandler())
|
| 61 |
|
| 62 |
def get_max_memory_config(max_memory_per_gpu):
|
| 63 |
"""Generate max_memory config for available GPUs"""
|
|
|
|
| 103 |
"low_cpu_mem_usage": True, # Still get memory optimization
|
| 104 |
}
|
| 105 |
)
|
|
|
|
|
|
|
| 106 |
|
| 107 |
logging.basicConfig(level=logging.INFO)
|
| 108 |
logging.getLogger("llama_index.core.agent").setLevel(logging.DEBUG)
|
|
|
|
| 110 |
|
| 111 |
Settings.llm = proj_llm
|
| 112 |
Settings.embed_model = embed_model
|
|
|
|
| 113 |
|
| 114 |
def read_and_parse_content(input_path: str) -> List[Document]:
|
| 115 |
"""
|
|
|
|
| 606 |
],
|
| 607 |
llm=proj_llm,
|
| 608 |
max_steps=8,
|
| 609 |
+
verbose=True)
|
|
|
|
|
|
|
| 610 |
|
| 611 |
self.code_agent = ReActAgent(
|
| 612 |
name="code_agent",
|
|
|
|
| 615 |
tools=[code_execution_tool],
|
| 616 |
llm=code_llm,
|
| 617 |
max_steps=6,
|
| 618 |
+
verbose=True)
|
|
|
|
|
|
|
| 619 |
|
| 620 |
# Fixed indentation: coordinator initialization inside __init__
|
| 621 |
self.coordinator = AgentWorkflow(
|