|
import os, threading |
|
import gradio as gr |
|
from crew import run_crew |
|
from utils import get_questions |
|
|
|
def ask(question, openai_api_key, gemini_api_key, anthropic_api_key, file_name = ""): |
|
""" |
|
Ask General AI Assistant a question to answer. |
|
|
|
Args: |
|
question (str): The question to answer |
|
openai_api_key (str): OpenAI API key |
|
gemini_api_key (str): Gemini API key |
|
anthropic_api_key (str): Anthropic API key |
|
file_name (str): Optional file name |
|
|
|
Returns: |
|
str: The answer to the question |
|
""" |
|
if not question: |
|
raise gr.Error("Question is required.") |
|
|
|
if not openai_api_key: |
|
raise gr.Error("OpenAI API Key is required.") |
|
|
|
if not gemini_api_key: |
|
raise gr.Error("Gemini API Key is required.") |
|
|
|
if not anthropic_api_key: |
|
raise gr.Error("Anthropic API Key is required.") |
|
|
|
if file_name: |
|
file_name = f"data/{file_name}" |
|
|
|
lock = threading.Lock() |
|
|
|
with lock: |
|
answer = "" |
|
|
|
try: |
|
os.environ["OPENAI_API_KEY"] = openai_api_key |
|
os.environ["GEMINI_API_KEY"] = gemini_api_key |
|
os.environ["MODEL_API_KEY"] = anthropic_api_key |
|
|
|
answer = run_crew(question, file_name) |
|
except Exception as e: |
|
raise gr.Error(e) |
|
finally: |
|
del os.environ["OPENAI_API_KEY"] |
|
del os.environ["GEMINI_API_KEY"] |
|
del os.environ["MODEL_API_KEY"] |
|
|
|
return answer |
|
|
|
gr.close_all() |
|
|
|
with gr.Blocks() as grady: |
|
gr.Markdown("## Grady - General AI Assistant") |
|
|
|
with gr.Tab("Solution"): |
|
gr.Markdown(os.environ.get("DESCRIPTION")) |
|
|
|
with gr.Row(): |
|
with gr.Column(scale=3): |
|
with gr.Row(): |
|
question = gr.Textbox( |
|
label="Question *", |
|
placeholder="In the 2025 Gradio Agents & MCP Hackathon, what percentage of participants submitted a solution during the last 24 hours?", |
|
interactive=True |
|
) |
|
with gr.Row(): |
|
level = gr.Radio( |
|
choices=[1, 2, 3], |
|
label="GAIA Benchmark Level", |
|
interactive=True, |
|
scale=1 |
|
) |
|
ground_truth = gr.Textbox( |
|
label="Ground Truth", |
|
interactive=True, |
|
scale=1 |
|
) |
|
file_name = gr.Textbox( |
|
label="File Name", |
|
interactive=True, |
|
scale=2 |
|
) |
|
with gr.Row(): |
|
openai_api_key = gr.Textbox( |
|
label="OpenAI API Key *", |
|
type="password", |
|
placeholder="sk‑...", |
|
interactive=True |
|
) |
|
gemini_api_key = gr.Textbox( |
|
label="Gemini API Key *", |
|
type="password", |
|
interactive=True |
|
) |
|
anthropic_api_key = gr.Textbox( |
|
label="Anthropic API Key *", |
|
type="password", |
|
placeholder="sk-ant-...", |
|
interactive=True |
|
) |
|
with gr.Row(): |
|
clear_btn = gr.ClearButton( |
|
components=[question, level, ground_truth, file_name] |
|
) |
|
submit_btn = gr.Button("Submit", variant="primary") |
|
with gr.Column(scale=1): |
|
answer = gr.Textbox( |
|
label="Answer", |
|
lines=1, |
|
interactive=False |
|
) |
|
|
|
submit_btn.click( |
|
fn=ask, |
|
inputs=[question, openai_api_key, gemini_api_key, anthropic_api_key, file_name], |
|
outputs=answer |
|
) |
|
|
|
QUESTION_FILE_PATH = "data/gaia_validation.jsonl" |
|
|
|
gr.Examples( |
|
label="GAIA Benchmark Level 1 Problems", |
|
examples=get_questions(QUESTION_FILE_PATH, 1), |
|
inputs=[question, level, ground_truth, file_name, openai_api_key, gemini_api_key, anthropic_api_key], |
|
outputs=answer, |
|
cache_examples=False |
|
) |
|
|
|
gr.Examples( |
|
label="GAIA Benchmark Level 2 Problems", |
|
examples=get_questions(QUESTION_FILE_PATH, 2), |
|
inputs=[question, level, ground_truth, file_name, openai_api_key, gemini_api_key, anthropic_api_key], |
|
outputs=answer, |
|
cache_examples=False |
|
) |
|
|
|
gr.Examples( |
|
label="GAIA Benchmark Level 3 Problems", |
|
examples=get_questions(QUESTION_FILE_PATH, 3), |
|
inputs=[question, level, ground_truth, file_name, openai_api_key, gemini_api_key, anthropic_api_key], |
|
outputs=answer, |
|
cache_examples=False |
|
) |
|
with gr.Tab("Documentation"): |
|
gr.Markdown(os.environ.get("DOCUMENTATION")) |
|
|
|
grady.launch(mcp_server=True) |