Tbaberca commited on
Commit
ad9e788
·
verified ·
1 Parent(s): b85ebdb

Update Gradio_UI.py

Browse files
Files changed (1) hide show
  1. Gradio_UI.py +21 -132
Gradio_UI.py CHANGED
@@ -1,135 +1,3 @@
1
- #!/usr/bin/env python
2
- # coding=utf-8
3
- # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
4
- #
5
- # Licensed under the Apache License, Version 2.0 (the "License");
6
- # you may not use this file except in compliance with the License.
7
- # You may obtain a copy of the License at
8
- #
9
- # http://www.apache.org/licenses/LICENSE-2.0
10
- #
11
- # Unless required by applicable law or agreed to in writing, software
12
- # distributed under the License is distributed on an "AS IS" BASIS,
13
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- # See the License for the specific language governing permissions and
15
- # limitations under the License.
16
-
17
- import mimetypes
18
- import os
19
- import re
20
- from typing import Optional
21
-
22
- from smolagents.agent_types import AgentAudio, AgentImage, AgentText, handle_agent_output_types
23
- from smolagents.agents import ActionStep
24
- from smolagents.memory import MemoryStep
25
- from smolagents.utils import _is_package_available
26
-
27
-
28
- def pull_messages_from_step(step_log: MemoryStep):
29
- import gradio as gr
30
-
31
- if isinstance(step_log, ActionStep):
32
- step_number = f"Step {step_log.step_number}" if step_log.step_number is not None else ""
33
- yield gr.ChatMessage(role="assistant", content=f"**{step_number}**")
34
-
35
- if hasattr(step_log, "model_output") and step_log.model_output is not None:
36
- model_output = step_log.model_output.strip()
37
- model_output = re.sub(r"```\s*<end_code>", "```", model_output)
38
- model_output = re.sub(r"<end_code>\s*```", "```", model_output)
39
- model_output = re.sub(r"```\s*\n\s*<end_code>", "```", model_output)
40
- model_output = model_output.strip()
41
- yield gr.ChatMessage(role="assistant", content=model_output)
42
-
43
- if hasattr(step_log, "tool_calls") and step_log.tool_calls is not None:
44
- first_tool_call = step_log.tool_calls[0]
45
- used_code = first_tool_call.name == "python_interpreter"
46
- parent_id = f"call_{len(step_log.tool_calls)}"
47
- args = first_tool_call.arguments
48
- content = str(args.get("answer", str(args))) if isinstance(args, dict) else str(args).strip()
49
-
50
- if used_code:
51
- content = re.sub(r"```.*?\n", "", content)
52
- content = re.sub(r"\s*<end_code>\s*", "", content).strip()
53
- if not content.startswith("```python"):
54
- content = f"```python\n{content}\n```"
55
-
56
- parent_message_tool = gr.ChatMessage(
57
- role="assistant",
58
- content=content,
59
- metadata={"title": f"🛠️ Used tool {first_tool_call.name}", "id": parent_id, "status": "pending"},
60
- )
61
- yield parent_message_tool
62
-
63
- if hasattr(step_log, "observations") and step_log.observations and step_log.observations.strip():
64
- log_content = step_log.observations.strip()
65
- log_content = re.sub(r"^Execution logs:\s*", "", log_content)
66
- yield gr.ChatMessage(
67
- role="assistant",
68
- content=log_content,
69
- metadata={"title": "📝 Execution Logs", "parent_id": parent_id, "status": "done"},
70
- )
71
-
72
- if hasattr(step_log, "error") and step_log.error is not None:
73
- yield gr.ChatMessage(
74
- role="assistant",
75
- content=str(step_log.error),
76
- metadata={"title": "💥 Error", "parent_id": parent_id, "status": "done"},
77
- )
78
-
79
- parent_message_tool.metadata["status"] = "done"
80
-
81
- elif hasattr(step_log, "error") and step_log.error is not None:
82
- yield gr.ChatMessage(role="assistant", content=str(step_log.error), metadata={"title": "💥 Error"})
83
-
84
- step_footnote = f"{step_number}"
85
- if hasattr(step_log, "input_token_count") and hasattr(step_log, "output_token_count"):
86
- token_str = (
87
- f" | Input-tokens:{step_log.input_token_count:,} | Output-tokens:{step_log.output_token_count:,}"
88
- )
89
- step_footnote += token_str
90
- if hasattr(step_log, "duration"):
91
- duration = round(float(step_log.duration), 2)
92
- step_footnote += f" | Duration: {duration}"
93
-
94
- step_footnote = f"""<span style="color: #bbbbc2; font-size: 12px;">{step_footnote}</span> """
95
- yield gr.ChatMessage(role="assistant", content=step_footnote)
96
- yield gr.ChatMessage(role="assistant", content="-----")
97
-
98
-
99
- def stream_to_gradio(agent, task: str, reset_agent_memory: bool = False, additional_args: Optional[dict] = None):
100
- if not _is_package_available("gradio"):
101
- raise ModuleNotFoundError(
102
- "Please install 'gradio' extra to use the GradioUI: `pip install 'smolagents[gradio]'`"
103
- )
104
-
105
- import gradio as gr
106
- total_input_tokens = 0
107
- total_output_tokens = 0
108
-
109
- for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args):
110
- if hasattr(agent.model, "last_input_token_count"):
111
- total_input_tokens += agent.model.last_input_token_count
112
- total_output_tokens += agent.model.last_output_token_count
113
- if isinstance(step_log, ActionStep):
114
- step_log.input_token_count = agent.model.last_input_token_count
115
- step_log.output_token_count = agent.model.last_output_token_count
116
-
117
- for message in pull_messages_from_step(step_log):
118
- yield message
119
-
120
- final_answer = handle_agent_output_types(step_log)
121
-
122
- import gradio as gr
123
- if isinstance(final_answer, AgentText):
124
- yield gr.ChatMessage(role="assistant", content=f"**Final answer:**\n{final_answer.to_string()}\n")
125
- elif isinstance(final_answer, AgentImage):
126
- yield gr.ChatMessage(role="assistant", content={"path": final_answer.to_string(), "mime_type": "image/png"})
127
- elif isinstance(final_answer, AgentAudio):
128
- yield gr.ChatMessage(role="assistant", content={"path": final_answer.to_string(), "mime_type": "audio/wav"})
129
- else:
130
- yield gr.ChatMessage(role="assistant", content=f"**Final answer:** {str(final_answer)}")
131
-
132
-
133
  class GradioUI:
134
  def __init__(self, agent, file_upload_folder: str = "./uploads"):
135
  self.agent = agent
@@ -189,3 +57,24 @@ class GradioUI:
189
 
190
  file_uploads_log.append(file_path)
191
  return gr.Textbox(f"File uploaded: {sanitized_name}", visible=True), file_uploads_log
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  class GradioUI:
2
  def __init__(self, agent, file_upload_folder: str = "./uploads"):
3
  self.agent = agent
 
57
 
58
  file_uploads_log.append(file_path)
59
  return gr.Textbox(f"File uploaded: {sanitized_name}", visible=True), file_uploads_log
60
+
61
+ def build_ui(self):
62
+ import gradio as gr
63
+
64
+ chatbot = gr.Chatbot()
65
+ msg = gr.Textbox(placeholder="Ask something...", label="Your message")
66
+ clear = gr.Button("Clear")
67
+ file_uploads_log = []
68
+
69
+ with gr.Blocks() as demo:
70
+ with gr.Row():
71
+ with gr.Column():
72
+ chatbot.render()
73
+ msg.render()
74
+ clear.render()
75
+
76
+ msg.submit(self.interact_with_agent, [msg, chatbot], chatbot)
77
+ clear.click(lambda: None, None, chatbot, queue=False)
78
+
79
+ return demo
80
+