Spaces:
Sleeping
Sleeping
File size: 6,498 Bytes
99d702e b15a92e 42ad35c b15a92e 13db0a0 e32b16d 13db0a0 e32b16d 99d702e b15a92e 6b9d1a2 42ad35c 6b9d1a2 e32b16d 13db0a0 e32b16d 87639c8 e32b16d 87639c8 e32b16d 72693ca e32b16d b15a92e e32b16d b15a92e e32b16d b15a92e e32b16d b15a92e 13db0a0 0547973 e32b16d 6b9d1a2 0bf62b4 72693ca 13db0a0 0bf62b4 13db0a0 e32b16d 13db0a0 e32b16d 13db0a0 e32b16d 13db0a0 0bf62b4 e32b16d 0bf62b4 e32b16d 72693ca 13db0a0 99d702e e32b16d 13db0a0 e32b16d 87639c8 13db0a0 e32b16d 13db0a0 b15a92e 13db0a0 e32b16d 13db0a0 b15a92e e32b16d 87639c8 13db0a0 72693ca e32b16d 13db0a0 e32b16d 13db0a0 b15a92e e32b16d b15a92e e32b16d b15a92e e32b16d b15a92e e32b16d b15a92e e32b16d 13db0a0 b15a92e e32b16d b15a92e 6b9d1a2 e32b16d 6b9d1a2 e32b16d 42ad35c e32b16d b15a92e 42ad35c b15a92e e32b16d 0547973 e32b16d b15a92e 42ad35c b15a92e 13db0a0 87639c8 e32b16d 6b9d1a2 72693ca |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 |
"""
ZEN-Bot Ultimate — key-free Hugging Face Space (free CPU)
Skills
• Normal chat
• /math <expr> — safe calculator
• /summarize <text> — 2-sentence TL;DR
• /translate_es <text> — English → Spanish
• /ascii <text> — FIGlet ASCII art
• Any question ending with “?” → live Wikipedia answer
↳ remembers last topic, so “Who discovered it?” works.
Model: facebook/blenderbot-400M-distill (public, ~720 MB)
"""
import ast, math, re, gc, traceback, torch, wikipedia, pyfiglet
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
# ───────────────────────── helpers ──────────────────────────
def log_err(e: Exception) -> str:
print("=== ZEN-Bot ERROR ===")
traceback.print_exc()
print("=====================\n")
return f"⚠️ {type(e).__name__}: {e}"
# ─────────────────────── model loading ──────────────────────
MODEL_ID = "facebook/blenderbot-400M-distill" # always public
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_ID)
model.eval(); torch.set_grad_enabled(False)
GEN_KW = dict(max_new_tokens=64, do_sample=False) # deterministic → faster
def llm(prompt: str) -> str:
try:
ids_in = tokenizer(prompt, return_tensors="pt",
truncation=True, max_length=1024).input_ids
with torch.no_grad():
ids_out = model.generate(ids_in, **GEN_KW)
reply_ids = ids_out[0, ids_in.shape[-1]:]
return tokenizer.decode(reply_ids, skip_special_tokens=True).strip()
except Exception as e:
return log_err(e)
# ──────────────────────── /math safe-eval ───────────────────
_MATH = {k: getattr(math, k) for k in dir(math) if not k.startswith("_")}
_MATH.update({"abs": abs, "round": round})
def safe_math(expr: str) -> str:
try:
node = ast.parse(expr, mode="eval")
def ok(n):
match n:
case ast.Num(): return True
case ast.UnaryOp(): return ok(n.operand)
case ast.BinOp(): return ok(n.left) and ok(n.right)
case ast.Call():
return (isinstance(n.func, ast.Name)
and n.func.id in _MATH
and all(ok(a) for a in n.args))
case _: return False
if not ok(node.body):
return "⛔️ Only basic math / trig / log functions allowed."
return str(eval(compile(node, "<expr>", "eval"),
{"__builtins__": {}}, _MATH))
except Exception as e:
return log_err(e)
# ──────────────────────── Wikipedia Q&A ─────────────────────
last_topic: str | None = None # shared across the single free CPU Space
def wiki_answer(q: str) -> str | None:
"""3-sentence Wikipedia answer — returns None if not found."""
global last_topic
try:
query = q.strip()
if last_topic and re.search(r"\bit\b", query, re.I):
query = re.sub(r"\bit\b", last_topic, query, flags=re.I)
wikipedia.set_lang("en")
page = wikipedia.page(query, auto_suggest=True, redirect=True)
last_topic = page.title # remember for next turn
summary = wikipedia.summary(page.title, sentences=3, auto_suggest=False)
return f"**{page.title}** — {summary}"
except (wikipedia.DisambiguationError, wikipedia.PageError):
return None
except Exception as e:
return log_err(e)
# ───────────────────────── ASCII art ─────────────────────────
def ascii_art(text: str) -> str:
try:
return pyfiglet.figlet_format(text, width=120)
except Exception as e:
return log_err(e)
# ────────────────── main router / callback ───────────────────
CMD = re.compile(r"^/(math|summarize|translate_es|ascii)\s+(.+)", re.S | re.I)
def respond(msg: str, history: list[list[str, str]]) -> str:
# A · commands
if (m := CMD.match(msg.strip())):
cmd, body = m.group(1).lower(), m.group(2).strip()
if cmd == "math": return safe_math(body)
if cmd == "ascii": return ascii_art(body)
if cmd == "summarize": return llm(f"Summarize in two concise sentences:\n\n{body}\n\nSummary:")
if cmd == "translate_es": return llm(f"Translate into Spanish (natural):\n\n{body}\n\nSpanish:")
# B · live Wikipedia for factual Qs
if msg.endswith("?") and len(msg.split()) > 2:
wiki = wiki_answer(msg)
if wiki: return wiki
# C · normal chat (keep last 6 turns for speed)
prompt = "You are ZEN-Bot, a concise, friendly tutor for young AI pioneers.\n\n"
for u, b in history[-6:]:
prompt += f"User: {u}\nAssistant: {b}\n"
prompt += f"User: {msg}\nAssistant:"
return llm(prompt)
# ────────────────────── Gradio Chat UI ───────────────────────
demo = gr.ChatInterface(
fn = respond,
title = "🚀 ZEN-Bot Ultimate (Key-Free)",
description = (
"**Commands** \n"
"• normal chat \n"
"• `/math 2**5 / (sin(0.5)+1)` \n"
"• `/summarize <text>` \n"
"• `/translate_es Hello!` \n"
"• `/ascii ZEN` \n"
"• Ask factual questions ending with `?` (remembers topic for 'it')"
),
examples = [
"Who discovered penicillin?",
"/ascii AI ROCKS",
"/math sqrt(144)+log(100,10)",
"/summarize The Industrial Revolution began in Britain...",
"/translate_es Good evening, friends!",
"Who discovered it?",
],
cache_examples = False, # ← avoids the startup pre-run
theme = "soft",
fill_height = True,
)
if __name__ == "__main__":
demo.launch()
gc.collect()
|