import gradio as gr import asyncio import os from dotenv import load_dotenv os.system("pip install git+https://huggingface.co/spaces/Agents-MCP-Hackathon/iLearn.git") from ilearn_core.agent import iLearnAgent # Load environment variables for local development load_dotenv() def initialize_agent(): """ Called once when the Gradio app loads to create a single agent instance. """ return iLearnAgent( provider_name=os.getenv("TOOL_DECISION_PROVIDER", "groq"), model_display_name="Llama 3 8B (Groq)" ) async def handle_chat_submit(user_message: str, history: list, agent_state: iLearnAgent): """ Handles the chat interaction, streaming the agent's response to the UI. """ history.append([user_message, ""]) yield history full_response = "" async for chunk in agent_state.chat(user_message): full_response += chunk history[-1][1] = full_response yield history with gr.Blocks(theme=gr.themes.Soft(), css="footer {display: none !important}") as demo: gr.Markdown("# 🤖 iLearn: The Autonomous Learning Agent") agent_state = gr.State() chatbot = gr.Chatbot( label="Conversation", bubble_full_width=False, height=600, render_markdown=True, ) with gr.Row(): msg_textbox = gr.Textbox( show_label=False, placeholder="Ask a question...", scale=7, autofocus=True, container=False, ) submit_btn = gr.Button("Send", variant="primary", scale=1, min_width=150) demo.load( fn=initialize_agent, inputs=None, outputs=[agent_state], show_progress="hidden" ) submit_action = msg_textbox.submit( fn=handle_chat_submit, inputs=[msg_textbox, chatbot, agent_state], outputs=[chatbot], ) submit_btn.click( fn=handle_chat_submit, inputs=[msg_textbox, chatbot, agent_state], outputs=[chatbot], ) submit_action.then( fn=lambda: gr.update(value=""), inputs=None, outputs=[msg_textbox], queue=False, ) if __name__ == "__main__": demo.queue().launch()