import os import gradio as gr import requests import pandas as pd from smolagents import CodeAgent from smolagents.models import InferenceClientModel from smolagents.tools import DuckDuckGoSearchTool # --- Agente Inteligente usando flan-t5-large + DuckDuckGo --- class BasicAgent: def __init__(self): model = InferenceClientModel(model_id="google/flan-t5-large") tools = [DuckDuckGoSearchTool()] self.agent = CodeAgent(model=model, tools=tools, add_base_tools=False, max_steps=5) print("🤖 Agente inteligente inicializado.") def __call__(self, question: str) -> str: print(f"❓ Pregunta recibida: {question[:50]}...") try: answer = self.agent.run(question).strip() print(f"✅ Respuesta generada: {answer}") return answer except Exception as e: print(f"❌ Error del agente: {e}") return f"AGENT ERROR: {e}" # --- Evaluación y envío GAIA --- DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space" def run_and_submit_all(profile: gr.OAuthProfile | None): space_id = os.getenv("SPACE_ID") if profile: username = profile.username print(f"👤 Usuario: {username}") else: return "⚠️ Por favor, inicia sesión en Hugging Face antes de enviar.", None api_url = DEFAULT_API_URL questions_url = f"{api_url}/questions" submit_url = f"{api_url}/submit" agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main" try: agent = BasicAgent() except Exception as e: return f"❌ Error al crear el agente: {e}", None # Descargar preguntas try: response = requests.get(questions_url, timeout=15) response.raise_for_status() questions_data = response.json() except Exception as e: return f"❌ Error al descargar preguntas: {e}", None # Responder preguntas results_log = [] answers_payload = [] for item in questions_data: task_id = item.get("task_id") question_text = item.get("question") if not task_id or question_text is None: continue try: submitted_answer = agent(question_text) answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer}) results_log.append({ "Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer }) except Exception as e: results_log.append({ "Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}" }) # Enviar respuestas submission_data = { "username": username, "agent_code": agent_code, "answers": answers_payload } try: response = requests.post(submit_url, json=submission_data, timeout=60) response.raise_for_status() result_data = response.json() final_status = ( f"✅ ¡Envío realizado con éxito!\n" f"👤 Usuario: {result_data.get('username')}\n" f"📊 Puntuación: {result_data.get('score', 'N/A')}% " f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correctas)\n" f"📬 Mensaje: {result_data.get('message', 'Sin mensaje.')}" ) return final_status, pd.DataFrame(results_log) except Exception as e: return f"❌ Error durante el envío: {e}", pd.DataFrame(results_log) # --- Interfaz Gradio --- with gr.Blocks() as demo: gr.Markdown("# 🧠 Evaluador Agente GAIA - Curso Hugging Face") gr.Markdown(""" 1. Inicia sesión en Hugging Face. 2. Pulsa el botón para ejecutar tu agente y enviar las respuestas. 3. Espera unos minutos y revisa la puntuación. """) gr.LoginButton() run_button = gr.Button("▶️ Ejecutar y Enviar Respuestas") status_output = gr.Textbox(label="Resultado", lines=6, interactive=False) results_table = gr.DataFrame(label="Respuestas Generadas") run_button.click(fn=run_and_submit_all, outputs=[status_output, results_table]) if __name__ == "__main__": print("🚀 Lanzando interfaz...") demo.launch(debug=True)