File size: 2,928 Bytes
96dbdda
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
# app.py
import os
import gradio as gr
from google import genai

# Read API key from env (set this as a Space secret on HF: GEMINI_API_KEY)
api_key = os.environ.get("GEMINI_API_KEY")
client = genai.Client(api_key=api_key)
MODEL = os.environ.get("GEMINI_MODEL", "gemini-2.5-flash")


def call_gemini(prompt: str) -> str:
    """Call Gemini synchronously and return text reply (safe fallback)."""
    try:
        response = client.models.generate_content(
            model=MODEL,
            contents=prompt,
        )
        # SDK commonly exposes textual output as .text
        return getattr(response, "text", str(response))
    except Exception as e:
        return f"[Error calling Gemini API: {e}]"


def generate_reply(message: str, history: list) -> list:
    """
    Given a user message and current `history` (list of role/content dicts),
    append the user's message, call Gemini, append the assistant reply, and
    return the updated history (in Gradio 'messages' format).
    """
    if history is None:
        history = []

    user_message = message.strip()
    if not user_message:
        return history

    # Append user message (messages format)
    history.append({"role": "user", "content": user_message})

    # Option A: send only the current user message as prompt:
    # reply_text = call_gemini(user_message)

    # Option B: (commented) send the full conversation as a single prompt for more context.
    # Uncomment if you want multi-turn context included.
    # combined_prompt = "\n".join(
    #     f"{m['role']}: {m['content']}" for m in history
    # )
    # reply_text = call_gemini(combined_prompt)

    # Using Option A by default:
    reply_text = call_gemini(user_message)

    # Append assistant response
    history.append({"role": "assistant", "content": reply_text})
    return history


with gr.Blocks(title="Gemini Chatbot") as demo:
    gr.Markdown("# Gemini Chatbot (Gradio — messages format)")

    # Use the new 'messages' type so Gradio won't warn about tuples->messages deprecation
    chatbot = gr.Chatbot(label="Gemini", type="messages")
    state = gr.State([])  # will hold the list of {"role","content"} dicts

    with gr.Row():
        txt = gr.Textbox(
            show_label=False,
            placeholder="Type your message and press Enter...",
            lines=1,
        )

    def user_submit(message, history):
        # Ensure history is a list
        if history is None:
            history = []
        updated = generate_reply(message, history)
        # Gradio expects (chatbot, state) outputs; we return the updated messages list for both
        return updated, updated

    txt.submit(fn=user_submit, inputs=[txt, state], outputs=[chatbot, state])
    gr.Button("Clear").click(lambda: ([], []), None, [chatbot, state], queue=False)

if __name__ == "__main__":
    demo.launch(server_name="0.0.0.0", server_port=int(os.environ.get("PORT", 7860)))