Spaces:
Sleeping
Sleeping
File size: 8,848 Bytes
33004b7 ddcbcce 33004b7 1da7f16 ddcbcce 1da7f16 ddcbcce 1da7f16 ddcbcce 1da7f16 ddcbcce 1da7f16 ddcbcce 1da7f16 ddcbcce 1da7f16 ddcbcce 1da7f16 ddcbcce 1da7f16 ddcbcce 1da7f16 ddcbcce 1da7f16 ddcbcce b038a3d 33004b7 1da7f16 33004b7 c5dd741 33004b7 c5dd741 33004b7 ddcbcce b038a3d ddcbcce 33004b7 1da7f16 33004b7 ddcbcce b038a3d ddcbcce 33004b7 1da7f16 33004b7 c5dd741 ddcbcce c5dd741 ddcbcce c5dd741 ddcbcce 33004b7 ddcbcce 33004b7 c5dd741 33004b7 ddcbcce 33004b7 ddcbcce 33004b7 ddcbcce 33004b7 ddcbcce 33004b7 ddcbcce 33004b7 1da7f16 33004b7 1da7f16 33004b7 ddcbcce b038a3d 1da7f16 b038a3d 1da7f16 ddcbcce 33004b7 1da7f16 ddcbcce 33004b7 1da7f16 ddcbcce b038a3d ddcbcce 33004b7 ddcbcce 33004b7 ddcbcce 33004b7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 |
import gradio as gr
import json, os, time, uuid, traceback
from smolagents import CodeAgent, tool
from smolagents.models import LiteLLMModel
try:
from smolagents import HfApiModel
except Exception:
HfApiModel = None
# --------- Model factory (latest-compatible) ----------
def get_model(prefer_openai=True):
"""
Pick an LLM for the agent.
- Prefer OpenAI via LiteLLM if OPENAI_API_KEY is set.
- Else fall back to Hugging Face Inference (HF_MODEL).
NOTE: Temperature is set on the *model* in current smolagents.
"""
last_error = None
if prefer_openai and os.getenv("OPENAI_API_KEY"):
model_id = os.getenv("OPENAI_MODEL", "gpt-4o-mini") # safer default than gpt-5
try:
return LiteLLMModel(
model_id=model_id,
api_key=os.getenv("OPENAI_API_KEY"),
base_url=os.getenv("OPENAI_BASE_URL"),
temperature=0 # determinism for orchestration
)
except Exception as e:
last_error = f"OpenAI model init failed for '{model_id}': {e}"
# one more fallback attempt
try:
return LiteLLMModel(
model_id="gpt-4o-mini",
api_key=os.getenv("OPENAI_API_KEY"),
base_url=os.getenv("OPENAI_BASE_URL"),
temperature=0
)
except Exception as e2:
last_error += f" | Fallback gpt-4o-mini failed: {e2}"
if HfApiModel is not None:
try:
return HfApiModel(os.getenv("HF_MODEL", "Qwen/Qwen2.5-7B-Instruct"))
except Exception as e:
last_error = f"HF model init failed: {e}"
raise RuntimeError(last_error or "No model available. Set OPENAI_API_KEY or HF_MODEL in Space secrets.")
# --------------- Diagnostics (OpenAI ping) ---------------
def diag_openai_ping(prompt="Say 'pong' and nothing else."):
"""
Quick connectivity test to your OpenAI key/model.
Newer smolagents: no system_prompt arg; put instructions in the goal text.
"""
try:
model = get_model(prefer_openai=True)
@tool
def echo_tool(x: str) -> str:
"""Echo the provided text.
Args:
x (str): Text to echo back.
Returns:
str: The same text.
"""
return x
agent = CodeAgent(
tools=[echo_tool],
model=model,
add_base_tools=False,
# no temperature or system_prompt here (unsupported on latest agent ctor)
)
goal = "Reply with the user message verbatim. No extra words.\nMessage: " + prompt
out = agent.run(goal)
return f"β
OpenAI ping ok.\nModel: {getattr(model, 'model_id', 'unknown')}\nOutput: {out}"
except Exception:
return "β OpenAI ping failed:\n" + traceback.format_exc()
# ===================== Exercise 1 =====================
@tool
def add(a: float, b: float) -> float:
"""Add two numbers.
Args:
a (float): First addend.
b (float): Second addend.
Returns:
float: Sum a + b.
"""
return a + b
@tool
def mul(a: float, b: float) -> float:
"""Multiply two numbers.
Args:
a (float): First factor.
b (float): Second factor.
Returns:
float: Product a * b.
"""
return a * b
def run_ex1():
try:
agent = CodeAgent(
tools=[add, mul],
model=get_model(),
add_base_tools=False,
)
goal = (
"You are a careful math assistant. Use the available tools to compute the answer.\n"
"Finally, PRINT ONLY a JSON object like {\"result\": <number>}.\n"
"Task: Compute 2 * (3 + 4) and return {\"result\": 14}."
)
out = agent.run(goal)
return str(out)
except Exception:
return "β Exercise 1 error:\n" + traceback.format_exc()
# ===================== Exercise 2 =====================
def run_ex2():
try:
agent = CodeAgent(
tools=[add, mul],
model=get_model(),
add_base_tools=False,
)
goal = (
"Follow this fixed plan exactly:\n"
"1) Call add(a=3, b=4) and store the result in x.\n"
"2) Call mul(a=2, b=x) to compute y.\n"
"3) PRINT ONLY this JSON: {\"result\": y}\n"
"Never print explanations.\n"
"Now execute the plan."
)
out = agent.run(goal)
return str(out)
except Exception:
return "β Exercise 2 error:\n" + traceback.format_exc()
# ===================== Exercise 3 =====================
@tool
def validate_pr(pr: dict) -> dict:
"""Validate basic PR fields and structure.
Args:
pr (dict): Purchase Requisition with keys: pr_id, requester, cost_center, currency, items.
Returns:
dict: {"ok": bool, "errors": list[str]}
"""
req = ["pr_id", "requester", "cost_center", "currency", "items"]
errors = [f"Missing {k}" for k in req if k not in pr]
if not isinstance(pr.get("items", []), list) or not pr["items"]:
errors.append("Items must be a non-empty list")
return {"ok": len(errors) == 0, "errors": errors}
@tool
def create_po(pr: dict) -> dict:
"""Create a simple Purchase Order (PO) from a PR.
Args:
pr (dict): Validated PR dict with items[{sku, quantity, unit_price}] and currency.
Returns:
dict: PO JSON with po_id, items, subtotal, tax, total, source_pr_id.
"""
subtotal = 0.0
items_out = []
for it in pr.get("items", []):
line = float(it["quantity"]) * float(it["unit_price"])
items_out.append({"sku": it.get("sku", "UNKNOWN"), "line_total": round(line, 2)})
subtotal += line
tax = round(subtotal * 0.08, 2)
total = round(subtotal + tax, 2)
return {
"po_id": f"PO-{int(time.time())}-{uuid.uuid4().hex[:6].upper()}",
"currency": pr.get("currency", "SGD"),
"items": items_out,
"subtotal": round(subtotal, 2),
"tax": tax,
"total": total,
"source_pr_id": pr.get("pr_id"),
}
DEFAULT_PR = json.dumps({
"pr_id": "PR-1001",
"requester": "A. Tan",
"cost_center": "CC-SG-OPS",
"currency": "SGD",
"items": [
{"sku": "PAPER-A4-80G", "quantity": 20, "unit_price": 6.8},
{"sku": "STAPLER-01", "quantity": 5, "unit_price": 18.5}
]
}, indent=2)
def run_ex3(pr_text):
# Validate JSON, then inline into goal (latest run() has no additional_context kw)
try:
_ = json.loads(pr_text)
except Exception as e:
return f"Invalid JSON: {e}"
try:
agent = CodeAgent(
tools=[validate_pr, create_po],
model=get_model(),
add_base_tools=False,
)
goal = (
"You are a procurement agent.\n"
"Use the tools as follows:\n"
"1) validate_pr(pr). If ok==false, PRINT ONLY {\"error\": errors}.\n"
"2) If ok==true, call create_po(pr).\n"
"3) PRINT ONLY the resulting PO JSON. No extra text.\n"
"The PR object you must operate on is provided below as JSON. "
"Parse it into a Python dict variable named `pr` before calling tools.\n\n"
"PR JSON:\n```json\n" + pr_text + "\n```\n"
)
out = agent.run(goal)
return str(out)
except Exception:
return "β Exercise 3 error:\n" + traceback.format_exc()
# --------------------- Gradio UI ---------------------
with gr.Blocks(title="Smolagents Beginner Lab (Online)") as demo:
gr.Markdown("# Smolagents Beginner Lab (Online)")
gr.Markdown("Set `OPENAI_API_KEY` in Space secrets (recommended). Optional: `OPENAI_MODEL` (e.g., gpt-4o-mini). "
"Fallback: set `HF_MODEL` (e.g., Qwen/Qwen2.5-7B-Instruct).")
with gr.Tab("Diagnostics"):
ping_btn = gr.Button("Run OpenAI ping test")
ping_out = gr.Textbox(label="Diagnostics Output", lines=12)
ping_btn.click(lambda: diag_openai_ping(), inputs=None, outputs=ping_out)
with gr.Tab("1) Hello Tools"):
btn1 = gr.Button("Run Exercise 1")
out1 = gr.Textbox(label="Output", lines=6)
btn1.click(lambda: run_ex1(), inputs=None, outputs=out1)
with gr.Tab("2) Guardrails (deterministic)"):
btn2 = gr.Button("Run Exercise 2")
out2 = gr.Textbox(label="Output", lines=6)
btn2.click(lambda: run_ex2(), inputs=None, outputs=out2)
with gr.Tab("3) Mini PR β PO"):
pr_input = gr.Textbox(label="PR JSON", value=DEFAULT_PR, lines=16)
btn3 = gr.Button("Run Exercise 3")
out3 = gr.Textbox(label="Output", lines=10)
btn3.click(run_ex3, inputs=pr_input, outputs=out3)
demo.launch()
|