Spaces:
Runtime error
Runtime error
# Python's OS interface for accessing environment variables | |
import os | |
# Intropesction utilities, you can auto-wrap it as a tool later. | |
import inspect | |
# HTTP client, Make REST calls for endpoints | |
import requests | |
# Parses CSV/Excel files | |
import pandas as pd | |
# Gradio - Provides the web format front-end you see in the Space-text boxes, logs, "Run Agent" button etc. | |
import gradio as gr | |
# smolagent - minimalist agent framework for LLMs with tools | |
# CodeAgent - Orchestrate ReAct loop, logs each step | |
# Tool - a base class and a decorator (@tool) | |
# InferenceClientModel - Wrapper for HF's Serverless Inference API so you dont need to stand up your own TGI/LLM endpoint | |
from smolagents import CodeAgent, DuckDuckGoSearchTool, Tool, InferenceClientModel | |
# Programmatic huggingface-cli login, so the app can: pull private models, call paid-tier inference, push artefacts | |
from huggingface_hub import login | |
# Quick helper to pull LangChain's built-in tools so you can blend them with smolagent tools if you wish. | |
from langchain.agents import load_tools | |
# Configuration constant | |
# Unit-4 scoring micro-services where your agent submits answers and receivess a JSON score. | |
# --- Constants | |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space" | |
# --- Basic Agent Definition --- | |
# ---- THIS IS WHERE YOU CAN BUILD WHAT YOU WANT ---- | |
# This class is a ready-to-run wrapper that: | |
# 1. Authenticates to the Hub | |
# 2. Spins up a server-side Qwen-32B LLM. | |
# 3. Gives it a DuckDuckGo search plug-in plus smolagents' standard library | |
# 4. Primes it with strict grading instructions. | |
# 5. Exposes a clean, callable interface for what ever frontend(Gradio, FastAPI, etc.) you bolt on. | |
class BasicAgent: | |
def __init__(self): | |
# Pull a HF access token from the Space's secrets or your local shell. You can download private models, call paid-tier Inference endpoints, push artefacts | |
hf_token = os.getenv("HUGGINGFACE_HUB_TOKEN") or os.getenv("HF_TOKEN") | |
# IF IT WORKS LOGIN INTO HF HUB VIA THIS TOKEN | |
if hf_token: | |
login(token=hf_token) | |
else: | |
try: | |
login() | |
except Exception as e: | |
raise Exception( | |
# helpful, course-style message | |
"Authentication failed. Please enter:\n" | |
"1. Run 'huggingface-cli login' in your terminal, or\n" | |
"2. Set HUGGINGFACE_HUB_TOKEN environment variable with your token, or\n" | |
"3. Get a token from https://huggingface.co/settings/tokens" | |
) from e | |
# Warps the servesless inference endpoint for the chosen model | |
# Initialize the model | |
# InferenceClientModel handles throttling, batching, and streaming under the hood | |
self.model = InferenceClientModel("Qwen/Qwen2.5-Code-32B-Instruct") | |
# Add a first tool | |
# Initialize the search tool | |
# DuckDuckGoSearchTool - Gives the agent web-search super-powers it can pull fresh facts during its reasoning loop. | |
self.search_tool = DuckDuckGoSearchTool() | |
# smolagents's flagship class - | |
# Code Agent follows a ReAct-style loop, literally write Python code, executes it in a sandbox, inspects the result, then decides its next step | |
self.agent = CodeAgent( | |
model=self.model, | |
tools=[self.search_tool], | |
# drops in a small standard library (Python REPL, JSON loader etc.) so you can solve many tasks without defining anything else. | |
add_base_tools=True, # - python_repl, browser, math etc. | |
# CodeAgent's auto_document_tools convenience flag | |
auto_document_tools=True | |
) | |
# Send a single "bootstrap" run whose only job is lock in behaviour rules: | |
# The returned text is captured in self.responses. | |
self.response = self.agent.run( | |
""" | |
You are a general AI assistant. | |
I will ask you a question. Report your thoughts, and finish your answer with the following template: [FINAL ANSWER]. | |
YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. | |
If you are asked for a number, do not use comma to write your number neither use units such as $ or percent sign unless specified otherwise. | |
If you are asked for a string, do not use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. | |
If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string. | |
You have access to the following tools: | |
Tool Name: search_tool, description: lets you search and browse the internet for accessing the most updated information out there. | |
If you require more tools to get a correct answer, create your own tools to utilize. | |
""") | |
# Turning BasicAgent into a callable object | |
# It means you can drop it straight into Gradio (or any other framework) without wrapping it in a standalone function. | |
# Debug prints show the round-trip in the server logs. | |
def __call__(self, question: str) -> str: | |
print(f"Agent received question:") | |
response = self.agent.run(question) | |
# the reply is generated on-the-fly, not hard coded. | |
print(f"Agent returning answer: {response}") | |
return response | |
# 1. Check if the user is logged in | |
# 2. Download questions from a grading API. | |
# 3. Use the BasicAgent to generate answers | |
# 4. Submit those answers back to the API. | |
# 5. Return the grading results + a full log for UI display (e.g. Gradio Table) | |
# Includes detailed logging, robust error handling, and submission payload formatting | |
def run_and_submit_all( profile: gr.OAuthProfile | None): | |
""" | |
Fetches all questions, runs the BasicAgent on them, submits all answers, and display the results. | |
""" | |
# --- Determine HF Space Runtime URL and Repo URL --- | |
# Authenticate user and runtime info | |
# Grabbing space_id from the environment lets the app dynamically construct a URL to your codebase. | |
# This will be included in the submission for transparency (important in peer-review courses.) | |
space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code | |
# If the gradio OAuth profile object is present, extract the username. | |
if profile: | |
username = f"{profile.username}" | |
print(f"User logged in: {username}") | |
# Otherwise, early exit with a friendly error message | |
else: | |
print("User not logged in.") | |
return "Please login to Hugging Face with the button.",None | |
# --- PrePare API endpoints --- | |
# Uses the provided scoring end point (defaulting to the course's hosted backen) | |
# Constucts two URLs: | |
api_url = DEFAULT_API_URL | |
# URL to Fetch the question bank. | |
question_url = f"{api_url}/questions" | |
# URL to POST answers for grading | |
submit_url = f"{api_url}/submit" | |
# 1. Instantiate Agent ( modify this part to create your agent) | |
# Tries to spin up your BasicAgent class from earlier. | |
# Includes token validation, model loading, tool setup, and system prompt injection. | |
# If this fails, the app gracefully exits, returning a user-visible error. | |
try: | |
agent = BasicAgent() | |
except Exception as e: | |
print(f"Error instantiating agent: {e}") | |
return f"Error initialiazing agent: {e}", None | |
# In the case of an app running as a HF space, this link points toward your codebase | |
# (usefull for others so please keep it public) | |
# Builds a link to your code repor on HF Hub (public space) | |
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main" | |
# Gets submitted with the answers for transparacey | |
print(agent_code) | |
# 2. Fetch Questions | |
# --- FETCH QUESTIONS FROM THE BACKEND --- | |
print(f"Fetching questions from: {questions_url}") | |
# Tries to GET the questions from the course's scoring server | |
try: | |
# Timout and error handling ensure the app does not hang or crash. | |
response = requests.get(requests, timeout=15) | |
questions_data = response.json() | |
# handles edge cases like empty response, malformed JSON, network Errors | |
# Empty response handling | |
if not questions_data: | |
print("Fetched questions list is empty.") | |
return "Fetched questions list is empty or invalid format.", None | |
print(f"Fetched {len(question_data)} questions.") | |
except requests.exceptions.RequestException as e: | |
print(f"Error fetching questions: {e}") | |
return f"Error fetching questions: {e}", None | |
except reqests.exceptions.JSONDecodeError as e: | |
print(f"Error decoding JSON response from questions endpoint: {e}") | |
print(f"Response text: {response.text[:500]}") | |
return f"Error decoding server response for questions: {e}, None" | |
except Exception as e: | |
print(f"An unexpected error occured fetching questions: {e}") | |
return f"An unexpected error occurred fetching questions: {e}", None | |
# 3. Run your agent. | |
# Loop through questions and generate answers | |
results_log = [] # Used to make a DataFrame for UI display (question + answer) | |
answers_payload = [] # sent to grading API in the final submission | |
# Loops through each question: | |
for item in questions_data: | |
# Extracts task_id | |
task_id = item.get("task_id") | |
# Extracts the question | |
question_text = item.get("question") | |
if not task_id or question_text is None: | |
print(f"Skipping item with missing task_id or question: {item}") | |
continue | |
# Use your agent (__call__) to answer the question | |
# Logs both result and metadata | |
try: | |
submitted_answer = agent(question_text) | |
answers_payload.append({"task_id": task_id, "submmitted_answer": submitted_answer}) | |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer}) | |
# On failure (bad formatting, model error, etc), logs an error message in the results. | |
except Exception as e: | |
print(f"Erron running agent on task {task_id}: {e}") | |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"}) | |
if not answers_payload: | |
print("Agent did not produce any asnwer to submit.") | |
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log) | |
# 4. Prepare submission | |
# A JSON-safe dict with everything the backend expects: Username (from loging), Code link (for peer review or reproducibility), All answers in the required format | |
submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload} | |
status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..." | |
print(status_update) | |
# 5. Submit | |
print(f"Submitting {len(answers_payload)} answers to: {submit_url}") | |
try: | |
# submits the payload to the grading server. | |
response = requests.post(submit_url, json=submission_data, timeout=60) | |
response.raise_for_status() | |
# if succesful; | |
result_data = response.json() | |
# Parse final score | |
final_status = ( | |
f"Submission Succesful!\n" | |
f"User: {result_data.get('username')}\n" | |
# Final score | |
f"Overall Score: {result_data.get('score','N/A')}%" | |
# Number of correct answers | |
f"({result_data.get('correct_count','?')}/{result_data.get('total_attempted','?')} correct)\n" | |
# Backend message | |
f"Message: {result_data.get('message','No message received.')}" | |
) | |
print("Submission succesful.") | |
results_df = pd.DataFrame(results_log) | |
# Return a user-friendly summary string and a Pandas Dataframe to display in Gradio | |
return final_status, results_df | |
# Handles possible errors | |
# Catchees and logs: | |
# - HTTP errors | |
except requests.exceptions.HTTPError as e: | |
error_detail = f"Server responded with status {e.response.status_code}." | |
try: | |
error_json = e.response.json() | |
error_detail += f"Detail: {error_json.get('detail',e.response.text)}" | |
# Unexpected server responses | |
except requests.exceptions.JSONDecodeError: | |
error_detail += f" Response: {e.response.text[:500]}" | |
status_message = f"Submission Failed: {error_detail}" | |
print(status_message) | |
results_df = pd.DataFrame(results_log) | |
return status_message, results_df | |
# - Timeout error | |
except requests.exceptions.Timeout: | |
status_message = "Submission Failed: The request timed out." | |
print(status_message) | |
results_df = pd.DataFrame(results_log) | |
return status_message, results_df | |
# Network issues | |
except Exception as e: | |
status_message = f"An unexpected error occurred during submission: {e}" | |
print(status_message) | |
results_df = pd.DataFrame(results_log) | |
# Ensure the return is still clean, with a Dataframe of what happened so far. | |
return status_message, results_df | |
# --- build Gradio Interface using Blocks --- | |
# Layout-based API | |
with gr.Blocks() as demo: | |
# Display the title | |
gr.Markdown("# Basic Agent Evaluation Runner") | |
# Display the instructions | |
gr.Markdown( | |
""" | |
**Instructions:** | |
1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ... | |
2. Log in to your Hugging Face account using the button below. This uses your HF username for submission. | |
3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score. | |
--- | |
**Disclaimers:** | |
Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions). | |
This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async. | |
""" | |
) | |
# Hugging Face Login button - allows users to authenticate with Hugging Face OAuth. | |
# This is required for tracking who is submitting. | |
# It returns a profile object once logged in. | |
gr.LoginButton() | |
# Define a Button to Trigger the Agent Run | |
# When clicked ,Instantiate your BasicAgent, Fetch questions, Run the agent, Submit answers,Show results | |
run_button = gr.Button("Run Evaluation & Submit All Answers") | |
# Output Display Components | |
# shows messages like “Submission Successful” or errors. | |
status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False) | |
# Removed max_rows=10 from DataFrame constructor | |
# displays a log of all questions and answers in tabular form. | |
# Useful for transparency or debugging agent behavior. | |
results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True) | |
# Connect Logic to the Button | |
# This is where everything connects together. | |
# Whent the button is clicked; | |
# 1. Runs run_and_submit_all(profile) | |
# 2. The login_button provides the gr.OAuthProfile | |
# 3. The return value (status + DataFrame) is sent to the Textbox and Dataframe. | |
run_button.click( | |
fn=run_and_submit_all, | |
outputs=[status_output, results_table] | |
) | |
# Entry point for the Python app. | |
## controls what happens when the script is run directly (e.g. on HF Space or locally). | |
### Purpose: main execution trigger | |
#### * Checks for environment setup (SPACE_HOST, SPACE_ID) | |
##### * Provides useful diagnostics (like URLs) | |
###### * Finally, it launches the Gradio app interface. | |
# A standard Python syntax to ensure the code only runs if the file is executed directly (not imported as module) | |
# Since app.py is the main file, this block is the app's entry point. | |
if __name__ == "__main__": | |
# Login app startup | |
# Pretty foramtion to indicate that the app is initializing. | |
# Outputs a visible header | |
print("\n" + "-"*30 + " App Starting " + "-"*30) # ------------------------------ App Starting ------------------------------ | |
# Check for SPACE_HOST and SPACE_ID at startup for information | |
# Check for HF environment variables | |
# NOTE: These are automatically set when the app is deployed on Hugging Face Spaces. | |
space_host_startup = os.getenv("SPACE_HOST") # subdomain for the Space (e.g., my-agent-space) | |
space_id_startup = os.getenv("SPACE_ID") # repo path (e.g., username/space-name) | |
# Print SPACE_HOST info | |
# If found, it logs the public URL of your Space. | |
if space_host_startup: | |
print(f"✅ SPACE_HOST found: {space_host_startup}") | |
print(f" Runtime URL should be: https://{space_host_startup}.hf.space") | |
else: | |
# If not found, the app might be running locally or in a non-Space environment. | |
print("ℹ️ SPACE_HOST environment variable not found (running locally?).") | |
# If found, it prints: | |
if space_id_startup: | |
# The repo homepage (good for credit/visibility) | |
print(f"✅ SPACE_ID found: {space_id_startup}") | |
# The repo tree (code browser) | |
print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}") | |
# These links are often included in the final submission for review. | |
print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main") | |
else: | |
print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.") | |
# 🔹 Final log and UI launch | |
# Finishes the startup banner and logs a message that the UI is about to appear. | |
print("-"*(60 + len(" App Starting ")) + "\n") | |
print("Launching Gradio Interface for Basic Agent Evaluation...") | |
# LAUNCH THE APP | |
# debug=True: Gradio will print extra logs (useful during development). | |
# share=False: disables Gradio's external link feature (you don’t need it on Hugging Face Spaces). | |
demo.launch(debug=True, share=False) # starts the Gradio interface. | |