Zeph / app.py
ZENLLC's picture
Update app.py
dce9d8f verified
import gradio as gr, json, plotly.graph_objects as go
from transformers import pipeline
from PIL import Image, ImageDraw
# ----------------------------
# Load instruction-tuned text model (fast on CPU)
# ----------------------------
chat_model = pipeline("text2text-generation", model="google/flan-t5-small", device=-1)
def query_llm(prompt, history, persona):
# History is ignored here because flan-t5 is single-turn
if persona != "Default":
prompt = f"As a {persona}, {prompt}"
out = chat_model(prompt, max_new_tokens=150)
return out[0]["generated_text"].strip()
def make_placeholder_image(prompt: str):
img = Image.new("RGB", (512, 512), color=(30, 30, 60))
d = ImageDraw.Draw(img)
d.text((20, 20), f"[Sketch of: {prompt}]", fill=(200, 200, 255))
return img
def multimodal_chat(user_msg, history, persona):
history = history or []
assistant_content = query_llm(user_msg, history, persona)
# Very simple routing: look for keywords
img, fig = None, None
if "chart" in user_msg.lower():
fig = go.Figure()
fig.add_trace(go.Scatter(x=[2010, 2020, 2030], y=[5, 50, 200], mode="lines+markers", name="AI Adoption"))
fig.update_layout(title="AI Adoption Over Time")
history.append([user_msg, "📊 Here's a chart of AI adoption"])
elif "simulate" in user_msg.lower():
steps = ["Aliens send a signal", "Humans decode it", "First meeting arranged"]
history.append([user_msg, "🔮 Simulation: First Contact\n" + "\n".join([f"→ {s}" for s in steps])])
elif "draw" in user_msg.lower() or "image" in user_msg.lower():
img = make_placeholder_image(user_msg)
history.append([user_msg, f"🖼️ (Placeholder image for: {user_msg})"])
else:
history.append([user_msg, assistant_content])
return history, img, fig
# ----------------------------
# Gradio UI
# ----------------------------
with gr.Blocks(css="style.css") as demo:
gr.Markdown("🧠 **ZEN Research Lab (CPU-Safe Edition)**", elem_id="zen-header")
gr.Markdown("✅ Text ✅ Charts ✅ Simulation ✅ Placeholder Images (no GPU needed)")
persona = gr.Dropdown(["Default","Analyst","Artist","Futurist","Philosopher"], label="Mode", value="Default")
chatbot = gr.Chatbot(label="Conversation", height=400)
with gr.Row():
user_msg = gr.Textbox(placeholder="Ask me anything…", label="Your message", scale=4)
send_btn = gr.Button("Send", variant="primary")
img_out = gr.Image(label="Generated image")
chart_out = gr.Plot(label="Interactive chart")
def respond(user_msg, chat_history, persona):
chat_history, img, fig = multimodal_chat(user_msg, chat_history, persona)
return (
chat_history,
gr.update(value=img) if img else gr.update(value=None),
gr.update(value=fig) if fig else gr.update(value=None)
)
send_btn.click(respond, inputs=[user_msg, chatbot, persona],
outputs=[chatbot, img_out, chart_out])
user_msg.submit(respond, inputs=[user_msg, chatbot, persona],
outputs=[chatbot, img_out, chart_out])
# Examples
with gr.Accordion("✨ Try these examples"):
gr.Examples(
examples=[
["Draw a futuristic city skyline at night"],
["Simulate first contact with an alien civilization"],
["Make a chart of AI adoption from 2010 to 2030"],
["Explain quantum entanglement in simple terms"],
],
inputs=[user_msg]
)
if __name__ == "__main__":
demo.queue(max_size=50).launch()