File size: 3,664 Bytes
11af061 92a0a6b dce9d8f 92a0a6b dce9d8f 92a0a6b dce9d8f 92a0a6b dce9d8f 92a0a6b dce9d8f fd2b1fa 92a0a6b 11af061 dce9d8f 11af061 92a0a6b dce9d8f 92a0a6b dce9d8f 92a0a6b e1ae3d1 92a0a6b dce9d8f 11af061 92a0a6b e1ae3d1 fd2b1fa e1ae3d1 fd2b1fa 92a0a6b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 |
import gradio as gr, json, plotly.graph_objects as go
from transformers import pipeline
from PIL import Image, ImageDraw
# ----------------------------
# Load instruction-tuned text model (fast on CPU)
# ----------------------------
chat_model = pipeline("text2text-generation", model="google/flan-t5-small", device=-1)
def query_llm(prompt, history, persona):
# History is ignored here because flan-t5 is single-turn
if persona != "Default":
prompt = f"As a {persona}, {prompt}"
out = chat_model(prompt, max_new_tokens=150)
return out[0]["generated_text"].strip()
def make_placeholder_image(prompt: str):
img = Image.new("RGB", (512, 512), color=(30, 30, 60))
d = ImageDraw.Draw(img)
d.text((20, 20), f"[Sketch of: {prompt}]", fill=(200, 200, 255))
return img
def multimodal_chat(user_msg, history, persona):
history = history or []
assistant_content = query_llm(user_msg, history, persona)
# Very simple routing: look for keywords
img, fig = None, None
if "chart" in user_msg.lower():
fig = go.Figure()
fig.add_trace(go.Scatter(x=[2010, 2020, 2030], y=[5, 50, 200], mode="lines+markers", name="AI Adoption"))
fig.update_layout(title="AI Adoption Over Time")
history.append([user_msg, "📊 Here's a chart of AI adoption"])
elif "simulate" in user_msg.lower():
steps = ["Aliens send a signal", "Humans decode it", "First meeting arranged"]
history.append([user_msg, "🔮 Simulation: First Contact\n" + "\n".join([f"→ {s}" for s in steps])])
elif "draw" in user_msg.lower() or "image" in user_msg.lower():
img = make_placeholder_image(user_msg)
history.append([user_msg, f"🖼️ (Placeholder image for: {user_msg})"])
else:
history.append([user_msg, assistant_content])
return history, img, fig
# ----------------------------
# Gradio UI
# ----------------------------
with gr.Blocks(css="style.css") as demo:
gr.Markdown("🧠 **ZEN Research Lab (CPU-Safe Edition)**", elem_id="zen-header")
gr.Markdown("✅ Text ✅ Charts ✅ Simulation ✅ Placeholder Images (no GPU needed)")
persona = gr.Dropdown(["Default","Analyst","Artist","Futurist","Philosopher"], label="Mode", value="Default")
chatbot = gr.Chatbot(label="Conversation", height=400)
with gr.Row():
user_msg = gr.Textbox(placeholder="Ask me anything…", label="Your message", scale=4)
send_btn = gr.Button("Send", variant="primary")
img_out = gr.Image(label="Generated image")
chart_out = gr.Plot(label="Interactive chart")
def respond(user_msg, chat_history, persona):
chat_history, img, fig = multimodal_chat(user_msg, chat_history, persona)
return (
chat_history,
gr.update(value=img) if img else gr.update(value=None),
gr.update(value=fig) if fig else gr.update(value=None)
)
send_btn.click(respond, inputs=[user_msg, chatbot, persona],
outputs=[chatbot, img_out, chart_out])
user_msg.submit(respond, inputs=[user_msg, chatbot, persona],
outputs=[chatbot, img_out, chart_out])
# Examples
with gr.Accordion("✨ Try these examples"):
gr.Examples(
examples=[
["Draw a futuristic city skyline at night"],
["Simulate first contact with an alien civilization"],
["Make a chart of AI adoption from 2010 to 2030"],
["Explain quantum entanglement in simple terms"],
],
inputs=[user_msg]
)
if __name__ == "__main__":
demo.queue(max_size=50).launch()
|