Spaces:
Sleeping
Sleeping
import os | |
import re | |
import shutil | |
import tempfile | |
import yaml | |
from pathlib import Path | |
from typing import Generator | |
from smolagents import CodeAgent, LiteLLMModel, MultiStepAgent, PlanningStep, Tool | |
from smolagents.agent_types import AgentAudio, AgentImage, AgentText | |
from smolagents.memory import ActionStep, FinalAnswerStep, MemoryStep | |
from smolagents.models import ChatMessageStreamDelta | |
from smolagents.utils import _is_package_available | |
from tools import search, scraper, FileReader, FileWriter, CommitChanges, get_repository_structure # Assuming tools.py is in the same directory | |
def get_step_footnote_content(step_log: MemoryStep, step_name: str) -> str: | |
"""Get a footnote string for a step log with duration and token information""" | |
step_footnote = f"**{step_name}**" | |
if hasattr(step_log, "input_token_count") and hasattr(step_log, "output_token_count"): | |
token_str = f" | Input tokens: {step_log.input_token_count:,} | Output tokens: {step_log.output_token_count:,}" | |
step_footnote += token_str | |
if hasattr(step_log, "duration"): | |
step_duration = f" | Duration: {round(float(step_log.duration), 2)}" if step_log.duration else None | |
step_footnote += step_duration | |
step_footnote_content = f"""<span style="color: #bbbbc2; font-size: 12px;">{step_footnote}</span> """ | |
return step_footnote_content | |
def _clean_model_output(model_output: str) -> str: | |
""" | |
Clean up model output by removing trailing tags and extra backticks. | |
Args: | |
model_output (`str`): Raw model output. | |
Returns: | |
`str`: Cleaned model output. | |
""" | |
if not model_output: | |
return "" | |
model_output = model_output.strip() | |
# Remove any trailing <end_code> and extra backticks, handling multiple possible formats | |
model_output = re.sub(r"```\s*<end_code>", "```", model_output) # handles ```<end_code> | |
model_output = re.sub(r"<end_code>\s*```", "```", model_output) # handles <end_code>``` | |
model_output = re.sub(r"```\s*\n\s*<end_code>", "```", model_output) # handles ```\n<end_code> | |
return model_output.strip() | |
def _format_code_content(content: str) -> str: | |
""" | |
Format code content as Python code block if it's not already formatted. | |
Args: | |
content (`str`): Code content to format. | |
Returns: | |
`str`: Code content formatted as a Python code block. | |
""" | |
content = content.strip() | |
# Remove existing code blocks and end_code tags | |
content = re.sub(r"```.*?\n", "", content) | |
content = re.sub(r"\s*<end_code>\s*", "", content) | |
content = content.strip() | |
# Add Python code block formatting if not already present | |
if not content.startswith("```python"): | |
content = f"```python\n{content}\n```" | |
return content | |
def _process_action_step(step_log: ActionStep, skip_model_outputs: bool = False) -> Generator: | |
""" | |
Process an [`ActionStep`] and yield appropriate Gradio ChatMessage objects. | |
Args: | |
step_log ([`ActionStep`]): ActionStep to process. | |
skip_model_outputs (`bool`): Whether to skip model outputs. | |
Yields: | |
`gradio.ChatMessage`: Gradio ChatMessages representing the action step. | |
""" | |
import gradio as gr | |
# Output the step number | |
step_number = f"Step {step_log.step_number}" if step_log.step_number is not None else "Step" | |
if not skip_model_outputs: | |
yield gr.ChatMessage(role="assistant", content=f"**{step_number}**", metadata={"status": "done"}) | |
# First yield the thought/reasoning from the LLM | |
if not skip_model_outputs and getattr(step_log, "model_output", ""): | |
model_output = _clean_model_output(step_log.model_output) | |
yield gr.ChatMessage(role="assistant", content=model_output, metadata={"status": "done"}) | |
# For tool calls, create a parent message | |
if getattr(step_log, "tool_calls", []): | |
first_tool_call = step_log.tool_calls[0] | |
used_code = first_tool_call.name == "python_interpreter" | |
# Process arguments based on type | |
args = first_tool_call.arguments | |
if isinstance(args, dict): | |
content = str(args.get("answer", str(args))) | |
else: | |
content = str(args).strip() | |
# Format code content if needed | |
if used_code: | |
content = _format_code_content(content) | |
# Create the tool call message | |
parent_message_tool = gr.ChatMessage( | |
role="assistant", | |
content=content, | |
metadata={ | |
"title": f"🛠️ Used tool {first_tool_call.name}", | |
"status": "done", | |
}, | |
) | |
yield parent_message_tool | |
# Display execution logs if they exist | |
if getattr(step_log, "observations", "") and step_log.observations.strip(): | |
log_content = step_log.observations.strip() | |
if log_content: | |
log_content = re.sub(r"^Execution logs:\s*", "", log_content) | |
yield gr.ChatMessage( | |
role="assistant", | |
content=f"```bash\n{log_content}\n", | |
metadata={"title": "📝 Execution Logs", "status": "done"}, | |
) | |
# Display any images in observations | |
if getattr(step_log, "observations_images", []): | |
for image in step_log.observations_images: | |
path_image = AgentImage(image).to_string() | |
yield gr.ChatMessage( | |
role="assistant", | |
content={"path": path_image, "mime_type": f"image/{path_image.split('.')[-1]}"}, | |
metadata={"title": "🖼️ Output Image", "status": "done"}, | |
) | |
# Handle errors | |
if getattr(step_log, "error", None): | |
yield gr.ChatMessage( | |
role="assistant", content=str(step_log.error), metadata={"title": "💥 Error", "status": "done"} | |
) | |
# Add step footnote and separator | |
yield gr.ChatMessage( | |
role="assistant", content=get_step_footnote_content(step_log, step_number), metadata={"status": "done"} | |
) | |
yield gr.ChatMessage(role="assistant", content="-----", metadata={"status": "done"}) | |
def _process_planning_step(step_log: PlanningStep, skip_model_outputs: bool = False) -> Generator: | |
""" | |
Process a [`PlanningStep`] and yield appropriate gradio.ChatMessage objects. | |
Args: | |
step_log ([`PlanningStep`]): PlanningStep to process. | |
Yields: | |
`gradio.ChatMessage`: Gradio ChatMessages representing the planning step. | |
""" | |
import gradio as gr | |
if not skip_model_outputs: | |
yield gr.ChatMessage(role="assistant", content="**Planning step**", metadata={"status": "done"}) | |
yield gr.ChatMessage(role="assistant", content=step_log.plan, metadata={"status": "done"}) | |
yield gr.ChatMessage( | |
role="assistant", content=get_step_footnote_content(step_log, "Planning step"), metadata={"status": "done"} | |
) | |
yield gr.ChatMessage(role="assistant", content="-----", metadata={"status": "done"}) | |
def _process_final_answer_step(step_log: FinalAnswerStep) -> Generator: | |
""" | |
Process a [`FinalAnswerStep`] and yield appropriate gradio.ChatMessage objects. | |
Args: | |
step_log ([`FinalAnswerStep`]): FinalAnswerStep to process. | |
Yields: | |
`gradio.ChatMessage`: Gradio ChatMessages representing the final answer. | |
""" | |
import gradio as gr | |
final_answer = step_log.final_answer | |
if isinstance(final_answer, AgentText): | |
yield gr.ChatMessage( | |
role="assistant", | |
content=f"**Final answer:**\n{final_answer.to_string()}\n", | |
metadata={"status": "done"}, | |
) | |
elif isinstance(final_answer, AgentImage): | |
yield gr.ChatMessage( | |
role="assistant", | |
content={"path": final_answer.to_string(), "mime_type": "image/png"}, | |
metadata={"status": "done"}, | |
) | |
elif isinstance(final_answer, AgentAudio): | |
yield gr.ChatMessage( | |
role="assistant", | |
content={"path": final_answer.to_string(), "mime_type": "audio/wav"}, | |
metadata={"status": "done"}, | |
) | |
else: | |
yield gr.ChatMessage( | |
role="assistant", content=f"**Final answer:** {str(final_answer)}", metadata={"status": "done"} | |
) | |
def pull_messages_from_step(step_log: MemoryStep, skip_model_outputs: bool = False): | |
"""Extract ChatMessage objects from agent steps with proper nesting. | |
Args: | |
step_log: The step log to display as gr.ChatMessage objects. | |
skip_model_outputs: If True, skip the model outputs when creating the gr.ChatMessage objects: | |
This is used for instance when streaming model outputs have already been displayed. | |
""" | |
if not _is_package_available("gradio"): | |
raise ModuleNotFoundError( | |
"Please install 'gradio' extra to use the GradioUI: `pip install 'smolagents[gradio]'`" | |
) | |
if isinstance(step_log, ActionStep): | |
yield from _process_action_step(step_log, skip_model_outputs) | |
elif isinstance(step_log, PlanningStep): | |
yield from _process_planning_step(step_log, skip_model_outputs) | |
elif isinstance(step_log, FinalAnswerStep): | |
yield from _process_final_answer_step(step_log) | |
else: | |
raise ValueError(f"Unsupported step type: {type(step_log)}") | |
def stream_to_gradio( | |
agent, | |
task: str, | |
task_images: list | None = None, | |
reset_agent_memory: bool = False, | |
additional_args: dict | None = None, | |
): | |
"""Runs an agent with the given task and streams the messages from the agent as gradio ChatMessages.""" | |
if not _is_package_available("gradio"): | |
raise ModuleNotFoundError( | |
"Please install 'gradio' extra to use the GradioUI: `pip install 'smolagents[gradio]'`" | |
) | |
intermediate_text = "" | |
for step_log in agent.run( | |
task, images=task_images, stream=True, reset=reset_agent_memory, additional_args=additional_args | |
): | |
# Track tokens if model provides them | |
if getattr(agent.model, "last_input_token_count", None) is not None: | |
if isinstance(step_log, (ActionStep, PlanningStep)): | |
step_log.input_token_count = agent.model.last_input_token_count | |
step_log.output_token_count = agent.model.last_output_token_count | |
if isinstance(step_log, MemoryStep): | |
intermediate_text = "" | |
for message in pull_messages_from_step( | |
step_log, | |
# If we're streaming model outputs, no need to display them twice | |
skip_model_outputs=getattr(agent, "stream_outputs", False), | |
): | |
yield message | |
elif isinstance(step_log, ChatMessageStreamDelta): | |
intermediate_text += step_log.content or "" | |
yield intermediate_text | |
class GradioUI: | |
"""A one-line interface to launch your agent in Gradio""" | |
def __init__(self, agent_name: str = "Agent interface", agent_description: str | None = None): | |
if not _is_package_available("gradio"): | |
raise ModuleNotFoundError( | |
"Please install 'gradio' extra to use the GradioUI: `pip install 'smolagents[gradio]'`" | |
) | |
self.agent_name = agent_name | |
self.agent_description = agent_description | |
def _initialize_agents(self, space_id: str, temp_dir: str): | |
"""Initializes agents and tools for a given space_id and temporary directory.""" | |
# Initialize model with your API key | |
model = LiteLLMModel(model_id="gemini/gemini-2.0-flash") | |
# Get repository structure | |
repo_structure = get_repository_structure(space_id=space_id) | |
# Load prompt templates | |
try: | |
with open("planning_agent_prompt_templates.yaml", "r") as f: | |
planning_agent_prompt_templates = yaml.safe_load(f) | |
with open("swe_agent_prompt_templates.yaml", "r") as f: | |
swe_agent_prompt_templates = yaml.safe_load(f) | |
except FileNotFoundError as e: | |
print(f"Error loading prompt templates: {e}") | |
print("Please ensure 'planning_agent_prompt_templates.yaml' and 'swe_agent_prompt_templates.yaml' are in the same directory.") | |
return None, None # Indicate failure | |
# Enhance prompts with repository structure | |
planning_agent_prompt_templates["system_prompt"] = planning_agent_prompt_templates["system_prompt"] + "\n\n\n" + repo_structure + "\n\n" | |
swe_agent_prompt_templates["system_prompt"] = swe_agent_prompt_templates["system_prompt"] + "\n\n\n" + repo_structure + "\n\n" | |
# Initialize tool instances with temp directory | |
read_file = FileReader(space_id=space_id, folder_path=temp_dir) | |
write_file = FileWriter(space_id=space_id, folder_path=temp_dir) | |
commit_changes = CommitChanges(space_id=space_id, folder_path=temp_dir) | |
# Initialize SWE Agent with enhanced capabilities and improved description | |
swe_agent = CodeAgent( | |
model=model, | |
prompt_templates=swe_agent_prompt_templates, | |
verbosity_level=1, | |
tools=[search, scraper, read_file, write_file], | |
name="swe_agent", | |
description="An expert Software Engineer capable of designing, developing, and debugging code. This agent can read and write files, search the web, and scrape web content to assist in coding tasks. It excels at implementing detailed technical specifications provided by the Planning Agent." | |
) | |
# Initialize Planning Agent with improved planning focus and description | |
planning_agent = CodeAgent( | |
model=model, | |
prompt_templates=planning_agent_prompt_templates, | |
verbosity_level=1, | |
tools=[search, scraper, read_file, write_file, commit_changes], | |
managed_agents=[swe_agent], | |
name="planning_agent", | |
description="A high-level planning agent responsible for breaking down complex user requests into actionable steps and delegating coding tasks to the Software Engineer Agent. It focuses on strategy, task decomposition, and coordinating the overall development process.", | |
stream_outputs=True | |
) | |
return planning_agent, swe_agent | |
def _update_space_id_and_agents(self, new_space_id: str, current_state: dict): | |
"""Handles space_id change, cleans up old temp dir, creates new, and re-initializes agents.""" | |
import gradio as gr | |
old_temp_dir = current_state.get("temp_dir") | |
if old_temp_dir and os.path.exists(old_temp_dir): | |
print(f"Cleaning up old temporary directory: {old_temp_dir}") | |
shutil.rmtree(old_temp_dir) | |
if not new_space_id: | |
current_state["space_id"] = None | |
current_state["temp_dir"] = None | |
current_state["agent"] = None | |
current_state["file_upload_folder"] = None | |
print("Space ID is empty. Agents are not initialized.") | |
return new_space_id, None, None, None, gr.Textbox("Please enter a Space ID to initialize agents.", visible=True) | |
# Create new temporary directory | |
temp_dir = tempfile.mkdtemp(prefix=f"ai_workspace_{new_space_id.replace("/", "_")}_") | |
file_upload_folder = Path(temp_dir) / "uploads" | |
file_upload_folder.mkdir(parents=True, exist_ok=True) | |
# Initialize agents | |
planning_agent, _ = self._initialize_agents(new_space_id, temp_dir) | |
if planning_agent is None: | |
# Handle initialization failure | |
shutil.rmtree(temp_dir) # Clean up the newly created temp dir | |
current_state["space_id"] = None | |
current_state["temp_dir"] = None | |
current_state["agent"] = None | |
current_state["file_upload_folder"] = None | |
return new_space_id, None, None, None, gr.Textbox("Failed to initialize agents. Check console for errors.", visible=True) | |
# Update session state | |
current_state["space_id"] = new_space_id | |
current_state["temp_dir"] = temp_dir | |
current_state["agent"] = planning_agent | |
current_state["file_upload_folder"] = file_upload_folder | |
print(f"Initialized agents for Space ID: {new_space_id} in {temp_dir}") | |
return new_space_id, [], [], file_upload_folder, gr.Textbox(f"Agents initialized for Space ID: {new_space_id}", visible=True) | |
def interact_with_agent(self, prompt, messages, session_state): | |
import gradio as gr | |
agent = session_state.get("agent") | |
if agent is None: | |
messages.append(gr.ChatMessage(role="assistant", content="Please enter a Space ID and initialize the agents first.")) | |
yield messages | |
return | |
try: | |
messages.append(gr.ChatMessage(role="user", content=prompt, metadata={"status": "done"})) | |
yield messages | |
for msg in stream_to_gradio(agent, task=prompt, reset_agent_memory=False): | |
if isinstance(msg, gr.ChatMessage): | |
messages.append(msg) | |
elif isinstance(msg, str): # Then it's only a completion delta | |
try: | |
if messages[-1].metadata["status"] == "pending": | |
messages[-1].content = msg | |
else: | |
messages.append( | |
gr.ChatMessage(role="assistant", content=msg, metadata={"status": "pending"}) | |
) | |
except Exception as e: | |
raise e | |
yield messages | |
yield messages | |
except Exception as e: | |
print(f"Error in interaction: {str(e)}") | |
messages.append(gr.ChatMessage(role="assistant", content=f"Error: {str(e)}")) | |
yield messages | |
def upload_file(self, file, file_uploads_log, session_state, allowed_file_types=None): | |
""" | |
Handle file uploads, default allowed types are .pdf, .docx, and .txt | |
""" | |
import gradio as gr | |
file_upload_folder = session_state.get("file_upload_folder") | |
if file_upload_folder is None: | |
return gr.Textbox("Please enter a Space ID and initialize agents before uploading files.", visible=True), file_uploads_log | |
if file is None: | |
return gr.Textbox(value="No file uploaded", visible=True), file_uploads_log | |
if allowed_file_types is None: | |
allowed_file_types = [".pdf", ".docx", ".txt"] | |
file_ext = os.path.splitext(file.name)[1].lower() | |
if file_ext not in allowed_file_types: | |
return gr.Textbox("File type disallowed", visible=True), file_uploads_log | |
# Sanitize file name | |
original_name = os.path.basename(file.name) | |
sanitized_name = re.sub( | |
r"[^\w\-.]", "_", original_name | |
) # Replace any non-alphanumeric, non-dash, or non-dot characters with underscores | |
# Save the uploaded file to the specified folder | |
file_path = os.path.join(file_upload_folder, os.path.basename(sanitized_name)) | |
shutil.copy(file.name, file_path) | |
return gr.Textbox(f"File uploaded: {file_path}", visible=True), file_uploads_log + [file_path] | |
def log_user_message(self, text_input, file_uploads_log): | |
import gradio as gr | |
return ( | |
text_input | |
+ ( | |
f"\nYou have been provided with these files, which might be helpful or not: {file_uploads_log}" | |
if len(file_uploads_log) > 0 | |
else "" | |
), | |
"", | |
gr.Button(interactive=False), | |
) | |
def launch(self, share: bool = True, **kwargs): | |
self.create_app().launch(debug=True, share=share, **kwargs) | |
def create_app(self): | |
import gradio as gr | |
with gr.Blocks(theme="ocean", fill_height=True) as demo: | |
# Add session state to store session-specific data | |
session_state = gr.State({}) | |
stored_messages = gr.State([]) | |
file_uploads_log = gr.State([]) | |
space_id_state = gr.State(None) # State to hold the current space_id | |
temp_dir_state = gr.State(None) # State to hold the current temp_dir | |
file_upload_folder_state = gr.State(None) # State to hold the current file upload folder | |
with gr.Sidebar(): | |
gr.Markdown( | |
f"# {self.agent_name.replace('_', ' ').capitalize()}" | |
"\n> This web ui allows you to interact with a `smolagents` agent that can use tools and execute steps to complete tasks." | |
+ (f"\n\n**Agent description:**\n{self.agent_description}" if self.agent_description else "") | |
) | |
# Add Space ID input | |
space_id_input = gr.Textbox( | |
label="Hugging Face Space ID", | |
placeholder="Enter your Space ID (e.g., username/space-name)", | |
interactive=True | |
) | |
initialization_status = gr.Textbox(label="Initialization Status", interactive=False, visible=True) | |
# Trigger agent initialization when space_id changes | |
space_id_input.change( | |
self._update_space_id_and_agents, | |
[space_id_input, session_state], | |
[space_id_state, stored_messages, file_uploads_log, file_upload_folder_state, initialization_status] | |
) | |
with gr.Group(): | |
gr.Markdown("**Your request**", container=True) | |
text_input = gr.Textbox( | |
lines=3, | |
label="Chat Message", | |
container=False, | |
placeholder="Enter your prompt here and press Shift+Enter or press the button", | |
) | |
submit_btn = gr.Button("Submit", variant="primary") | |
gr.HTML( | |
"<br><br><h4><center>Powered by <a target='_blank' href='https://github.com/huggingface/smolagents'><b>smolagents</b></a></center></h4>" | |
) | |
# Main chat interface | |
chatbot = gr.Chatbot( | |
label="Agent", | |
type="messages", | |
avatar_images=( | |
None, | |
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolagents/mascot_smol.png", | |
), | |
resizeable=True, | |
scale=1, | |
) | |
# Set up event handlers | |
text_input.submit( | |
self.log_user_message, | |
[text_input, file_uploads_log], | |
[stored_messages, text_input, submit_btn], | |
).then(self.interact_with_agent, [stored_messages, chatbot, session_state], [chatbot]).then( | |
lambda: ( | |
gr.Textbox( | |
interactive=True, placeholder="Enter your prompt here and press Shift+Enter or the button" | |
), | |
gr.Button(interactive=True), | |
), | |
None, | |
[text_input, submit_btn], | |
) | |
submit_btn.click( | |
self.log_user_message, | |
[text_input, file_uploads_log], | |
[stored_messages, text_input, submit_btn], | |
).then(self.interact_with_agent, [stored_messages, chatbot, session_state], [chatbot]).then( | |
lambda: ( | |
gr.Textbox( | |
interactive=True, placeholder="Enter your prompt here and press Shift+Enter or the button" | |
), | |
gr.Button(interactive=True), | |
), | |
None, | |
[text_input, submit_btn], | |
) | |
return demo | |
def run_gradio_agent(): | |
"""Runs the Gradio UI for the agent.""" | |
# The GradioUI class now handles space_id input and agent initialization internally | |
GradioUI( | |
agent_name="SmolAgents Gradio Interface", | |
agent_description="Interact with a SmolAgents planning agent capable of code generation and execution." | |
).launch() | |
if __name__ == "__main__": | |
# The script now directly launches the Gradio UI | |
run_gradio_agent() | |