import gradio as gr import json, os, time, uuid, traceback from smolagents import CodeAgent, tool from smolagents.models import LiteLLMModel try: from smolagents import HfApiModel except Exception: HfApiModel = None # --------- Model factory (latest-compatible) ---------- def get_model(prefer_openai=True): """ Pick an LLM for the agent. - Prefer OpenAI via LiteLLM if OPENAI_API_KEY is set. - Else fall back to Hugging Face Inference (HF_MODEL). NOTE: Temperature is set on the *model* in current smolagents. """ last_error = None if prefer_openai and os.getenv("OPENAI_API_KEY"): model_id = os.getenv("OPENAI_MODEL", "gpt-4o-mini") # safer default than gpt-5 try: return LiteLLMModel( model_id=model_id, api_key=os.getenv("OPENAI_API_KEY"), base_url=os.getenv("OPENAI_BASE_URL"), temperature=0 # determinism for orchestration ) except Exception as e: last_error = f"OpenAI model init failed for '{model_id}': {e}" # one more fallback attempt try: return LiteLLMModel( model_id="gpt-4o-mini", api_key=os.getenv("OPENAI_API_KEY"), base_url=os.getenv("OPENAI_BASE_URL"), temperature=0 ) except Exception as e2: last_error += f" | Fallback gpt-4o-mini failed: {e2}" if HfApiModel is not None: try: return HfApiModel(os.getenv("HF_MODEL", "Qwen/Qwen2.5-7B-Instruct")) except Exception as e: last_error = f"HF model init failed: {e}" raise RuntimeError(last_error or "No model available. Set OPENAI_API_KEY or HF_MODEL in Space secrets.") # --------------- Diagnostics (OpenAI ping) --------------- def diag_openai_ping(prompt="Say 'pong' and nothing else."): """ Quick connectivity test to your OpenAI key/model. Newer smolagents: no system_prompt arg; put instructions in the goal text. """ try: model = get_model(prefer_openai=True) @tool def echo_tool(x: str) -> str: """Echo the provided text. Args: x (str): Text to echo back. Returns: str: The same text. """ return x agent = CodeAgent( tools=[echo_tool], model=model, add_base_tools=False, # no temperature or system_prompt here (unsupported on latest agent ctor) ) goal = "Reply with the user message verbatim. No extra words.\nMessage: " + prompt out = agent.run(goal) return f"✅ OpenAI ping ok.\nModel: {getattr(model, 'model_id', 'unknown')}\nOutput: {out}" except Exception: return "❌ OpenAI ping failed:\n" + traceback.format_exc() # ===================== Exercise 1 ===================== @tool def add(a: float, b: float) -> float: """Add two numbers. Args: a (float): First addend. b (float): Second addend. Returns: float: Sum a + b. """ return a + b @tool def mul(a: float, b: float) -> float: """Multiply two numbers. Args: a (float): First factor. b (float): Second factor. Returns: float: Product a * b. """ return a * b def run_ex1(): try: agent = CodeAgent( tools=[add, mul], model=get_model(), add_base_tools=False, ) goal = ( "You are a careful math assistant. Use the available tools to compute the answer.\n" "Finally, PRINT ONLY a JSON object like {\"result\": }.\n" "Task: Compute 2 * (3 + 4) and return {\"result\": 14}." ) out = agent.run(goal) return str(out) except Exception: return "❌ Exercise 1 error:\n" + traceback.format_exc() # ===================== Exercise 2 ===================== def run_ex2(): try: agent = CodeAgent( tools=[add, mul], model=get_model(), add_base_tools=False, ) goal = ( "Follow this fixed plan exactly:\n" "1) Call add(a=3, b=4) and store the result in x.\n" "2) Call mul(a=2, b=x) to compute y.\n" "3) PRINT ONLY this JSON: {\"result\": y}\n" "Never print explanations.\n" "Now execute the plan." ) out = agent.run(goal) return str(out) except Exception: return "❌ Exercise 2 error:\n" + traceback.format_exc() # ===================== Exercise 3 ===================== @tool def validate_pr(pr: dict) -> dict: """Validate basic PR fields and structure. Args: pr (dict): Purchase Requisition with keys: pr_id, requester, cost_center, currency, items. Returns: dict: {"ok": bool, "errors": list[str]} """ req = ["pr_id", "requester", "cost_center", "currency", "items"] errors = [f"Missing {k}" for k in req if k not in pr] if not isinstance(pr.get("items", []), list) or not pr["items"]: errors.append("Items must be a non-empty list") return {"ok": len(errors) == 0, "errors": errors} @tool def create_po(pr: dict) -> dict: """Create a simple Purchase Order (PO) from a PR. Args: pr (dict): Validated PR dict with items[{sku, quantity, unit_price}] and currency. Returns: dict: PO JSON with po_id, items, subtotal, tax, total, source_pr_id. """ subtotal = 0.0 items_out = [] for it in pr.get("items", []): line = float(it["quantity"]) * float(it["unit_price"]) items_out.append({"sku": it.get("sku", "UNKNOWN"), "line_total": round(line, 2)}) subtotal += line tax = round(subtotal * 0.08, 2) total = round(subtotal + tax, 2) return { "po_id": f"PO-{int(time.time())}-{uuid.uuid4().hex[:6].upper()}", "currency": pr.get("currency", "SGD"), "items": items_out, "subtotal": round(subtotal, 2), "tax": tax, "total": total, "source_pr_id": pr.get("pr_id"), } DEFAULT_PR = json.dumps({ "pr_id": "PR-1001", "requester": "A. Tan", "cost_center": "CC-SG-OPS", "currency": "SGD", "items": [ {"sku": "PAPER-A4-80G", "quantity": 20, "unit_price": 6.8}, {"sku": "STAPLER-01", "quantity": 5, "unit_price": 18.5} ] }, indent=2) def run_ex3(pr_text): # Validate JSON, then inline into goal (latest run() has no additional_context kw) try: _ = json.loads(pr_text) except Exception as e: return f"Invalid JSON: {e}" try: agent = CodeAgent( tools=[validate_pr, create_po], model=get_model(), add_base_tools=False, ) goal = ( "You are a procurement agent.\n" "Use the tools as follows:\n" "1) validate_pr(pr). If ok==false, PRINT ONLY {\"error\": errors}.\n" "2) If ok==true, call create_po(pr).\n" "3) PRINT ONLY the resulting PO JSON. No extra text.\n" "The PR object you must operate on is provided below as JSON. " "Parse it into a Python dict variable named `pr` before calling tools.\n\n" "PR JSON:\n```json\n" + pr_text + "\n```\n" ) out = agent.run(goal) return str(out) except Exception: return "❌ Exercise 3 error:\n" + traceback.format_exc() # --------------------- Gradio UI --------------------- with gr.Blocks(title="Smolagents Beginner Lab (Online)") as demo: gr.Markdown("# Smolagents Beginner Lab (Online)") gr.Markdown("Set `OPENAI_API_KEY` in Space secrets (recommended). Optional: `OPENAI_MODEL` (e.g., gpt-4o-mini). " "Fallback: set `HF_MODEL` (e.g., Qwen/Qwen2.5-7B-Instruct).") with gr.Tab("Diagnostics"): ping_btn = gr.Button("Run OpenAI ping test") ping_out = gr.Textbox(label="Diagnostics Output", lines=12) ping_btn.click(lambda: diag_openai_ping(), inputs=None, outputs=ping_out) with gr.Tab("1) Hello Tools"): btn1 = gr.Button("Run Exercise 1") out1 = gr.Textbox(label="Output", lines=6) btn1.click(lambda: run_ex1(), inputs=None, outputs=out1) with gr.Tab("2) Guardrails (deterministic)"): btn2 = gr.Button("Run Exercise 2") out2 = gr.Textbox(label="Output", lines=6) btn2.click(lambda: run_ex2(), inputs=None, outputs=out2) with gr.Tab("3) Mini PR → PO"): pr_input = gr.Textbox(label="PR JSON", value=DEFAULT_PR, lines=16) btn3 = gr.Button("Run Exercise 3") out3 = gr.Textbox(label="Output", lines=10) btn3.click(run_ex3, inputs=pr_input, outputs=out3) demo.launch()