Spaces:
Sleeping
Sleeping
""" | |
ZEN-Bot Ultimate — key-free Hugging Face Space (free CPU) | |
Skills | |
• Normal chat | |
• /math <expr> — safe calculator | |
• /summarize <text> — 2-sentence TL;DR | |
• /translate_es <text> — English → Spanish | |
• /ascii <text> — FIGlet ASCII art | |
• Any question ending with “?” → live Wikipedia answer | |
↳ remembers last topic, so “Who discovered it?” works. | |
Model: facebook/blenderbot-400M-distill (public, ~720 MB) | |
""" | |
import ast, math, re, gc, traceback, torch, wikipedia, pyfiglet | |
import gradio as gr | |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM | |
# ───────────────────────── helpers ────────────────────────── | |
def log_err(e: Exception) -> str: | |
print("=== ZEN-Bot ERROR ===") | |
traceback.print_exc() | |
print("=====================\n") | |
return f"⚠️ {type(e).__name__}: {e}" | |
# ─────────────────────── model loading ────────────────────── | |
MODEL_ID = "facebook/blenderbot-400M-distill" # always public | |
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) | |
model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_ID) | |
model.eval(); torch.set_grad_enabled(False) | |
GEN_KW = dict(max_new_tokens=64, do_sample=False) # deterministic → faster | |
def llm(prompt: str) -> str: | |
try: | |
ids_in = tokenizer(prompt, return_tensors="pt", | |
truncation=True, max_length=1024).input_ids | |
with torch.no_grad(): | |
ids_out = model.generate(ids_in, **GEN_KW) | |
reply_ids = ids_out[0, ids_in.shape[-1]:] | |
return tokenizer.decode(reply_ids, skip_special_tokens=True).strip() | |
except Exception as e: | |
return log_err(e) | |
# ──────────────────────── /math safe-eval ─────────────────── | |
_MATH = {k: getattr(math, k) for k in dir(math) if not k.startswith("_")} | |
_MATH.update({"abs": abs, "round": round}) | |
def safe_math(expr: str) -> str: | |
try: | |
node = ast.parse(expr, mode="eval") | |
def ok(n): | |
match n: | |
case ast.Num(): return True | |
case ast.UnaryOp(): return ok(n.operand) | |
case ast.BinOp(): return ok(n.left) and ok(n.right) | |
case ast.Call(): | |
return (isinstance(n.func, ast.Name) | |
and n.func.id in _MATH | |
and all(ok(a) for a in n.args)) | |
case _: return False | |
if not ok(node.body): | |
return "⛔️ Only basic math / trig / log functions allowed." | |
return str(eval(compile(node, "<expr>", "eval"), | |
{"__builtins__": {}}, _MATH)) | |
except Exception as e: | |
return log_err(e) | |
# ──────────────────────── Wikipedia Q&A ───────────────────── | |
last_topic: str | None = None # shared across the single free CPU Space | |
def wiki_answer(q: str) -> str | None: | |
"""3-sentence Wikipedia answer — returns None if not found.""" | |
global last_topic | |
try: | |
query = q.strip() | |
if last_topic and re.search(r"\bit\b", query, re.I): | |
query = re.sub(r"\bit\b", last_topic, query, flags=re.I) | |
wikipedia.set_lang("en") | |
page = wikipedia.page(query, auto_suggest=True, redirect=True) | |
last_topic = page.title # remember for next turn | |
summary = wikipedia.summary(page.title, sentences=3, auto_suggest=False) | |
return f"**{page.title}** — {summary}" | |
except (wikipedia.DisambiguationError, wikipedia.PageError): | |
return None | |
except Exception as e: | |
return log_err(e) | |
# ───────────────────────── ASCII art ───────────────────────── | |
def ascii_art(text: str) -> str: | |
try: | |
return pyfiglet.figlet_format(text, width=120) | |
except Exception as e: | |
return log_err(e) | |
# ────────────────── main router / callback ─────────────────── | |
CMD = re.compile(r"^/(math|summarize|translate_es|ascii)\s+(.+)", re.S | re.I) | |
def respond(msg: str, history: list[list[str, str]]) -> str: | |
# A · commands | |
if (m := CMD.match(msg.strip())): | |
cmd, body = m.group(1).lower(), m.group(2).strip() | |
if cmd == "math": return safe_math(body) | |
if cmd == "ascii": return ascii_art(body) | |
if cmd == "summarize": return llm(f"Summarize in two concise sentences:\n\n{body}\n\nSummary:") | |
if cmd == "translate_es": return llm(f"Translate into Spanish (natural):\n\n{body}\n\nSpanish:") | |
# B · live Wikipedia for factual Qs | |
if msg.endswith("?") and len(msg.split()) > 2: | |
wiki = wiki_answer(msg) | |
if wiki: return wiki | |
# C · normal chat (keep last 6 turns for speed) | |
prompt = "You are ZEN-Bot, a concise, friendly tutor for young AI pioneers.\n\n" | |
for u, b in history[-6:]: | |
prompt += f"User: {u}\nAssistant: {b}\n" | |
prompt += f"User: {msg}\nAssistant:" | |
return llm(prompt) | |
# ────────────────────── Gradio Chat UI ─────────────────────── | |
demo = gr.ChatInterface( | |
fn = respond, | |
title = "🚀 ZEN-Bot Ultimate (Key-Free)", | |
description = ( | |
"**Commands** \n" | |
"• normal chat \n" | |
"• `/math 2**5 / (sin(0.5)+1)` \n" | |
"• `/summarize <text>` \n" | |
"• `/translate_es Hello!` \n" | |
"• `/ascii ZEN` \n" | |
"• Ask factual questions ending with `?` (remembers topic for 'it')" | |
), | |
examples = [ | |
"Who discovered penicillin?", | |
"/ascii AI ROCKS", | |
"/math sqrt(144)+log(100,10)", | |
"/summarize The Industrial Revolution began in Britain...", | |
"/translate_es Good evening, friends!", | |
"Who discovered it?", | |
], | |
cache_examples = False, # ← avoids the startup pre-run | |
theme = "soft", | |
fill_height = True, | |
) | |
if __name__ == "__main__": | |
demo.launch() | |
gc.collect() | |