Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -4,24 +4,28 @@ import gradio as gr
|
|
4 |
import os
|
5 |
import subprocess
|
6 |
from langchain_mistralai.chat_models import ChatMistralAI
|
7 |
-
from langchain_core.messages import HumanMessage,
|
8 |
from langchain.agents import AgentExecutor, create_tool_calling_agent
|
9 |
from langchain_core.prompts import ChatPromptTemplate
|
10 |
from langchain.tools import tool
|
11 |
|
12 |
# --- 1. 환경 설정 및 LLM 초기화 ---
|
13 |
API_KEY = os.environ.get("MISTRAL_API_KEY", None)
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
|
|
|
|
18 |
else:
|
19 |
-
|
20 |
|
21 |
-
# --- 2. 핵심 도구(Tools)
|
|
|
22 |
@tool
|
23 |
def generate_c_code(description: str) -> str:
|
24 |
"""Generates complete, compilable C code based on a natural language description. Use this when the user wants to create new code."""
|
|
|
25 |
code_generation_prompt = f"""You are a world-class C programming expert.
|
26 |
Generate a single, complete, and compilable C code file based on this request: '{description}'.
|
27 |
The code must be clean, efficient, and include necessary headers.
|
@@ -38,26 +42,21 @@ def compile_and_run_c_code(code: str) -> str:
|
|
38 |
try:
|
39 |
with open("main.c", "w", encoding='utf-8') as f:
|
40 |
f.write(code)
|
41 |
-
|
42 |
compile_proc = subprocess.run(
|
43 |
["gcc", "main.c", "-o", "main.out", "-lm", "-w"],
|
44 |
capture_output=True, text=True, timeout=15
|
45 |
)
|
46 |
if compile_proc.returncode != 0:
|
47 |
return f"--- COMPILATION FAILED ---\n{compile_proc.stderr}"
|
48 |
-
|
49 |
run_proc = subprocess.run(
|
50 |
-
["./main.out"],
|
51 |
-
capture_output=True, text=True, timeout=15
|
52 |
)
|
53 |
if run_proc.returncode != 0:
|
54 |
return f"--- RUNTIME ERROR ---\n{run_proc.stderr}"
|
55 |
-
|
56 |
output = run_proc.stdout
|
57 |
if not output.strip():
|
58 |
return "--- EXECUTION SUCCEEDED ---\n(No output was produced)"
|
59 |
return f"--- EXECUTION SUCCEEDED ---\n{output}"
|
60 |
-
|
61 |
except subprocess.TimeoutExpired:
|
62 |
return "--- ERROR ---\nProcess timed out. Check for infinite loops."
|
63 |
except Exception as e:
|
@@ -66,11 +65,10 @@ def compile_and_run_c_code(code: str) -> str:
|
|
66 |
@tool
|
67 |
def refactor_or_analyze_c_code(code: str, request: str) -> str:
|
68 |
"""Analyzes, refactors, or explains a given C code based on a user's specific request. Use this for code improvement or understanding."""
|
|
|
69 |
analysis_prompt = f"""You are a senior C code reviewer. Analyze the following C code based on the user's request.
|
70 |
Provide a clear, concise, and helpful response. If refactoring, provide the complete improved code in a C code block.
|
71 |
-
|
72 |
User's Request: '{request}'
|
73 |
-
|
74 |
C Code to Analyze:
|
75 |
```c
|
76 |
{code}
|
@@ -78,82 +76,66 @@ C Code to Analyze:
|
|
78 |
response = llm.invoke([HumanMessage(content=analysis_prompt)])
|
79 |
return response.content
|
80 |
|
81 |
-
# --- 3.
|
82 |
-
|
|
|
|
|
|
|
83 |
|
84 |
-
|
85 |
-
|
|
|
|
|
|
|
86 |
("system", "You are a powerful C-language assistant agent. You can generate, compile, and analyze C code. Think step-by-step and use your tools to fulfill the user's request. If you generate code, you should almost always offer to compile and run it for the user as the next step."),
|
87 |
("placeholder", "{chat_history}"),
|
88 |
("human", "{input}"),
|
89 |
("placeholder", "{agent_scratchpad}"),
|
90 |
-
]
|
91 |
-
)
|
92 |
-
|
93 |
-
if llm:
|
94 |
agent = create_tool_calling_agent(llm, tools, agent_prompt)
|
95 |
-
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
|
96 |
-
else:
|
97 |
-
agent_executor = None
|
98 |
|
99 |
def agent_chat(user_input, history):
|
100 |
if agent_executor is None:
|
101 |
yield "Error: MISTRAL_API_KEY is not configured. Please set it in the Space secrets."
|
102 |
return
|
103 |
-
|
104 |
chat_history = []
|
105 |
for human, ai in history:
|
106 |
chat_history.append(HumanMessage(content=human))
|
107 |
chat_history.append(AIMessage(content=ai))
|
108 |
-
|
109 |
-
response_stream = agent_executor.stream({
|
110 |
-
"input": user_input,
|
111 |
-
"chat_history": chat_history
|
112 |
-
})
|
113 |
-
|
114 |
full_response = ""
|
115 |
for chunk in response_stream:
|
116 |
if "output" in chunk:
|
117 |
full_response += chunk["output"]
|
118 |
yield full_response
|
119 |
|
120 |
-
# ---
|
121 |
with gr.Blocks(theme=gr.themes.Monochrome(), css=".gradio-container{max-width: 800px !important; margin: auto;}") as demo:
|
122 |
gr.Markdown("# 🚀 C-Codestral Agent")
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
[
|
147 |
-
"Generate a C program to calculate the factorial of a number.",
|
148 |
-
"Now, compile and run the code you just created.",
|
149 |
-
"Refactor the factorial code to use recursion instead of a loop.",
|
150 |
-
],
|
151 |
-
inputs=txt,
|
152 |
-
label="Example Prompts"
|
153 |
-
)
|
154 |
-
|
155 |
-
# --- 5. MCP 도구 노출 ---
|
156 |
-
gr.load_tools(tools)
|
157 |
|
158 |
if __name__ == "__main__":
|
159 |
demo.queue().launch(debug=True)
|
|
|
4 |
import os
|
5 |
import subprocess
|
6 |
from langchain_mistralai.chat_models import ChatMistralAI
|
7 |
+
from langchain_core.messages import HumanMessage, AIMessage
|
8 |
from langchain.agents import AgentExecutor, create_tool_calling_agent
|
9 |
from langchain_core.prompts import ChatPromptTemplate
|
10 |
from langchain.tools import tool
|
11 |
|
12 |
# --- 1. 환경 설정 및 LLM 초기화 ---
|
13 |
API_KEY = os.environ.get("MISTRAL_API_KEY", None)
|
14 |
+
llm = None
|
15 |
+
if API_KEY:
|
16 |
+
try:
|
17 |
+
llm = ChatMistralAI(model="codestral-latest", temperature=0, api_key=API_KEY, streaming=True)
|
18 |
+
except Exception as e:
|
19 |
+
print(f"Failed to initialize LLM: {e}")
|
20 |
else:
|
21 |
+
print("FATAL: MISTRAL_API_KEY is not set in Space Secrets.")
|
22 |
|
23 |
+
# --- 2. 핵심 도구(Tools) 정의 ---
|
24 |
+
# LangChain 에이전트가 내부적으로 사용할 @tool 데코레이터
|
25 |
@tool
|
26 |
def generate_c_code(description: str) -> str:
|
27 |
"""Generates complete, compilable C code based on a natural language description. Use this when the user wants to create new code."""
|
28 |
+
if not llm: return "Error: LLM not initialized."
|
29 |
code_generation_prompt = f"""You are a world-class C programming expert.
|
30 |
Generate a single, complete, and compilable C code file based on this request: '{description}'.
|
31 |
The code must be clean, efficient, and include necessary headers.
|
|
|
42 |
try:
|
43 |
with open("main.c", "w", encoding='utf-8') as f:
|
44 |
f.write(code)
|
|
|
45 |
compile_proc = subprocess.run(
|
46 |
["gcc", "main.c", "-o", "main.out", "-lm", "-w"],
|
47 |
capture_output=True, text=True, timeout=15
|
48 |
)
|
49 |
if compile_proc.returncode != 0:
|
50 |
return f"--- COMPILATION FAILED ---\n{compile_proc.stderr}"
|
|
|
51 |
run_proc = subprocess.run(
|
52 |
+
["./main.out"], capture_output=True, text=True, timeout=15
|
|
|
53 |
)
|
54 |
if run_proc.returncode != 0:
|
55 |
return f"--- RUNTIME ERROR ---\n{run_proc.stderr}"
|
|
|
56 |
output = run_proc.stdout
|
57 |
if not output.strip():
|
58 |
return "--- EXECUTION SUCCEEDED ---\n(No output was produced)"
|
59 |
return f"--- EXECUTION SUCCEEDED ---\n{output}"
|
|
|
60 |
except subprocess.TimeoutExpired:
|
61 |
return "--- ERROR ---\nProcess timed out. Check for infinite loops."
|
62 |
except Exception as e:
|
|
|
65 |
@tool
|
66 |
def refactor_or_analyze_c_code(code: str, request: str) -> str:
|
67 |
"""Analyzes, refactors, or explains a given C code based on a user's specific request. Use this for code improvement or understanding."""
|
68 |
+
if not llm: return "Error: LLM not initialized."
|
69 |
analysis_prompt = f"""You are a senior C code reviewer. Analyze the following C code based on the user's request.
|
70 |
Provide a clear, concise, and helpful response. If refactoring, provide the complete improved code in a C code block.
|
|
|
71 |
User's Request: '{request}'
|
|
|
72 |
C Code to Analyze:
|
73 |
```c
|
74 |
{code}
|
|
|
76 |
response = llm.invoke([HumanMessage(content=analysis_prompt)])
|
77 |
return response.content
|
78 |
|
79 |
+
# --- 3. MCP 도구 API 생성 ---
|
80 |
+
# 각 도구를 gr.Interface를 사용해 API로 만듭니다. 이것이 외부 MCP 클라이언트에 노출됩니다.
|
81 |
+
generate_api = gr.Interface(fn=generate_c_code, inputs="text", outputs="code", title="C Code Generator", description="Generates C code from a description.")
|
82 |
+
compile_api = gr.Interface(fn=compile_and_run_c_code, inputs="code", outputs="text", title="C Code Compiler & Runner", description="Compiles and runs C code.")
|
83 |
+
refactor_api = gr.Interface(fn=refactor_or_analyze_c_code, inputs=["code", "text"], outputs="markdown", title="C Code Refactor & Analyzer", description="Refactors or analyzes C code.")
|
84 |
|
85 |
+
# --- 4. LangChain 에이전트 설정 ---
|
86 |
+
tools = [generate_c_code, compile_and_run_c_code, refactor_or_analyze_c_code]
|
87 |
+
agent_executor = None
|
88 |
+
if llm:
|
89 |
+
agent_prompt = ChatPromptTemplate.from_messages([
|
90 |
("system", "You are a powerful C-language assistant agent. You can generate, compile, and analyze C code. Think step-by-step and use your tools to fulfill the user's request. If you generate code, you should almost always offer to compile and run it for the user as the next step."),
|
91 |
("placeholder", "{chat_history}"),
|
92 |
("human", "{input}"),
|
93 |
("placeholder", "{agent_scratchpad}"),
|
94 |
+
])
|
|
|
|
|
|
|
95 |
agent = create_tool_calling_agent(llm, tools, agent_prompt)
|
96 |
+
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True, handle_parsing_errors=True)
|
|
|
|
|
97 |
|
98 |
def agent_chat(user_input, history):
|
99 |
if agent_executor is None:
|
100 |
yield "Error: MISTRAL_API_KEY is not configured. Please set it in the Space secrets."
|
101 |
return
|
|
|
102 |
chat_history = []
|
103 |
for human, ai in history:
|
104 |
chat_history.append(HumanMessage(content=human))
|
105 |
chat_history.append(AIMessage(content=ai))
|
106 |
+
response_stream = agent_executor.stream({"input": user_input, "chat_history": chat_history})
|
|
|
|
|
|
|
|
|
|
|
107 |
full_response = ""
|
108 |
for chunk in response_stream:
|
109 |
if "output" in chunk:
|
110 |
full_response += chunk["output"]
|
111 |
yield full_response
|
112 |
|
113 |
+
# --- 5. Gradio UI/UX 디자인 ---
|
114 |
with gr.Blocks(theme=gr.themes.Monochrome(), css=".gradio-container{max-width: 800px !important; margin: auto;}") as demo:
|
115 |
gr.Markdown("# 🚀 C-Codestral Agent")
|
116 |
+
with gr.Tabs():
|
117 |
+
with gr.TabItem("🤖 Agent Chat"):
|
118 |
+
chatbot = gr.Chatbot(label="C-Agent", bubble_full_width=False, height=600, avatar_images=(None, "https://huggingface.co/datasets/huggingface/brand-assets/resolve/main/hf-logo-with-title.png"))
|
119 |
+
with gr.Row():
|
120 |
+
txt = gr.Textbox(show_label=False, placeholder="Enter your request and press enter...", container=False, scale=7)
|
121 |
+
submit_btn = gr.Button("Submit", variant="primary", scale=1)
|
122 |
+
txt.submit(agent_chat, [txt, chatbot], chatbot)
|
123 |
+
submit_btn.click(agent_chat, [txt, chatbot], chatbot)
|
124 |
+
txt.submit(lambda: "", None, txt)
|
125 |
+
submit_btn.click(lambda: "", None, txt)
|
126 |
+
gr.Examples(
|
127 |
+
["Generate a C program to calculate the factorial of a number.", "Now, compile and run the code you just created.", "Refactor the factorial code to use recursion instead of a loop."],
|
128 |
+
inputs=txt, label="Example Prompts"
|
129 |
+
)
|
130 |
+
# MCP 도구들을 별도의 탭에 API로 노출
|
131 |
+
with gr.TabItem("🛠️ MCP Tools API"):
|
132 |
+
gr.Markdown("## Available MCP Tools\nThese APIs can be used by any MCP-compliant client.")
|
133 |
+
with gr.Accordion("Tool: Generate C Code", open=False):
|
134 |
+
generate_api.render()
|
135 |
+
with gr.Accordion("Tool: Compile & Run C Code", open=False):
|
136 |
+
compile_api.render()
|
137 |
+
with gr.Accordion("Tool: Refactor & Analyze C Code", open=False):
|
138 |
+
refactor_api.render()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
139 |
|
140 |
if __name__ == "__main__":
|
141 |
demo.queue().launch(debug=True)
|