File size: 19,371 Bytes
faa4f79
 
79f86c4
3a57265
 
faa4f79
 
 
3a57265
faa4f79
 
 
e5ba201
3a57265
774dbcb
faa4f79
f58b37a
774dbcb
f58b37a
5dd2db6
774dbcb
43660aa
774dbcb
cf08f86
6602841
79f86c4
b8f3d8a
 
774dbcb
 
 
79f86c4
b8f3d8a
 
 
 
79f86c4
 
 
 
 
 
b8f3d8a
 
79f86c4
 
 
b8f3d8a
79f86c4
 
 
 
 
 
b8f3d8a
 
 
 
 
79f86c4
 
b8f3d8a
 
 
 
 
 
 
 
d35b974
2744903
b8f3d8a
 
6317ffc
79f86c4
 
b7a6a02
7b92c9b
b8f3d8a
 
 
 
 
774dbcb
7b92c9b
b8f3d8a
 
 
 
f58b37a
b8f3d8a
7b92c9b
 
 
b8f3d8a
 
 
774dbcb
b8f3d8a
 
 
 
 
 
79f86c4
 
b8f3d8a
79f86c4
b8f3d8a
 
 
6317ffc
 
b8f3d8a
 
 
774dbcb
b8f3d8a
 
 
 
 
79f86c4
faa4f79
69fd992
6317ffc
b8f3d8a
97f33d9
69fd992
79f86c4
774dbcb
6317ffc
faa4f79
b8f3d8a
 
faa4f79
 
b8f3d8a
 
 
 
faa4f79
b8f3d8a
 
 
faa4f79
b8f3d8a
 
d35b974
f58b37a
 
 
faa4f79
b8f3d8a
79f86c4
 
 
 
6317ffc
faa4f79
b8f3d8a
 
 
6317ffc
 
 
 
 
 
faa4f79
 
 
774dbcb
596cd6b
 
f58b37a
774dbcb
f58b37a
 
faa4f79
b8f3d8a
faa4f79
 
774dbcb
faa4f79
f58b37a
faa4f79
 
 
6317ffc
faa4f79
6317ffc
 
 
f58b37a
774dbcb
f58b37a
faa4f79
774dbcb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
faa4f79
 
8378e4a
 
774dbcb
8378e4a
 
 
f58b37a
774dbcb
 
f58b37a
774dbcb
 
 
 
 
 
 
f58b37a
774dbcb
 
 
 
 
 
f58b37a
774dbcb
 
 
 
f58b37a
 
774dbcb
 
 
 
 
 
 
 
 
a2f6537
 
 
 
774dbcb
 
a2f6537
 
774dbcb
 
a2f6537
 
774dbcb
 
 
a2f6537
 
 
 
 
 
 
774dbcb
f58b37a
 
7fb6aae
a2f6537
774dbcb
f58b37a
a2f6537
f58b37a
4aa3056
f58b37a
 
 
 
4aa3056
f58b37a
 
4aa3056
f58b37a
 
4aa3056
 
f58b37a
 
4aa3056
f58b37a
 
774dbcb
 
 
 
 
 
4aa3056
f58b37a
 
 
 
4aa3056
774dbcb
 
f58b37a
 
 
 
 
 
 
 
 
4aa3056
 
 
f58b37a
 
 
 
 
 
 
 
 
 
774dbcb
 
f58b37a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7fb6aae
f58b37a
 
 
 
 
a2f6537
 
 
faa4f79
f58b37a
 
 
faa4f79
b8f3d8a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
#!/usr/bin/env python

import os
import re
import tempfile
from collections.abc import Iterator
from threading import Thread

import cv2
import gradio as gr
import spaces
import torch
from loguru import logger
from PIL import Image
from transformers import AutoProcessor, Gemma3ForConditionalGeneration, TextIteratorStreamer

# ──────────────────────────────────────────────────────────────────────────────
# MODEL
# ──────────────────────────────────────────────────────────────────────────────
model_id = os.getenv("MODEL_ID", "rmdhirr/gemma-dpo-model-170")
processor = AutoProcessor.from_pretrained(model_id, padding_side="left")
model = Gemma3ForConditionalGeneration.from_pretrained(
    model_id, device_map="auto", torch_dtype=torch.bfloat16, attn_implementation="eager"
)
model.eval()

MAX_NUM_IMAGES = int(os.getenv("MAX_NUM_IMAGES", "5"))

# ──────────────────────────────────────────────────────────────────────────────
# HELPERS
# ──────────────────────────────────────────────────────────────────────────────
def count_files_in_new_message(paths: list[str]) -> tuple[int, int]:
    image_count = 0
    video_count = 0
    for path in paths:
        if path.endswith(".mp4"):
            video_count += 1
        else:
            image_count += 1
    return image_count, video_count

def count_files_in_history(history: list[dict]) -> tuple[int, int]:
    image_count = 0
    video_count = 0
    for item in history:
        if item["role"] != "user" or isinstance(item["content"], str):
            continue
        if item["content"][0].endswith(".mp4"):
            video_count += 1
        else:
            image_count += 1
    return image_count, video_count

def validate_media_constraints(message: dict, history: list[dict]) -> bool:
    new_image_count, new_video_count = count_files_in_new_message(message["files"])
    history_image_count, history_video_count = count_files_in_history(history)
    image_count = history_image_count + new_image_count
    video_count = history_video_count + new_video_count
    if video_count > 1:
        gr.Warning("Only one video is supported.")
        return False
    if video_count == 1:
        if image_count > 0:
            gr.Warning("Mixing images and videos is not allowed.")
            return False
        if "<image>" in message["text"]:
            gr.Warning("Using <image> tags with video files is not supported.")
            return False
    if video_count == 0 and image_count > MAX_NUM_IMAGES:
        gr.Warning(f"You can upload up to {MAX_NUM_IMAGES} images.")
        return False
    if "<image>" in message["text"] and message["text"].count("<image>") != new_image_count:
        gr.Warning("The number of <image> tags in the text does not match the number of images.")
        return False
    return True

def downsample_video(video_path: str) -> list[tuple[Image.Image, float]]:
    vidcap = cv2.VideoCapture(video_path)
    fps = vidcap.get(cv2.CAP_PROP_FPS)
    total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
    frame_interval = max(total_frames // MAX_NUM_IMAGES, 1)
    frames: list[tuple[Image.Image, float]] = []
    for i in range(0, min(total_frames, MAX_NUM_IMAGES * frame_interval), frame_interval):
        if len(frames) >= MAX_NUM_IMAGES: break
        vidcap.set(cv2.CAP_PROP_POS_FRAMES, i)
        success, image = vidcap.read()
        if success:
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            pil_image = Image.fromarray(image)
            timestamp = round(i / fps, 2) if fps else 0.0
            frames.append((pil_image, timestamp))
    vidcap.release()
    return frames

def process_video(video_path: str) -> list[dict]:
    content = []
    frames = downsample_video(video_path)
    for pil_image, timestamp in frames:
        with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as temp_file:
            pil_image.save(temp_file.name)
            content.append({"type": "text", "text": f"Frame {timestamp}:"})
            content.append({"type": "image", "url": temp_file.name})
    logger.debug(f"{content=}")
    return content

def process_interleaved_images(message: dict) -> list[dict]:
    logger.debug(f"{message['files']=}")
    parts = re.split(r"(<image>)", message["text"])
    logger.debug(f"{parts=}")
    content = []
    image_index = 0
    for part in parts:
        if part == "<image>":
            content.append({"type": "image", "url": message["files"][image_index]})
            logger.debug(f"file: {message['files'][image_index]}")
            image_index += 1
        elif isinstance(part, str) and part.strip():
            content.append({"type": "text", "text": part.strip()})
        elif isinstance(part, str) and part != "<image>":
            content.append({"type": "text", "text": part})
    logger.debug(f"{content=}")
    return content

def process_new_user_message(message: dict) -> list[dict]:
    if not message["files"]:
        return [{"type": "text", "text": message["text"]}]
    if message["files"][0].endswith(".mp4"):
        return [{"type": "text", "text": message["text"]}, *process_video(message["files"][0])]
    if "<image>" in message["text"]:
        return process_interleaved_images(message)
    return [{"type": "text", "text": message["text"]}, *[{"type": "image", "url": p} for p in message["files"]]]

def process_history(history: list[dict]) -> list[dict]:
    messages = []
    current_user_content: list[dict] = []
    for item in history:
        if item["role"] == "assistant":
            if current_user_content:
                messages.append({"role": "user", "content": current_user_content})
                current_user_content = []
            messages.append({"role": "assistant", "content": [{"type": "text", "text": item["content"]}]})
        else:
            content = item["content"]
            if isinstance(content, str):
                current_user_content.append({"type": "text", "text": content})
            else:
                current_user_content.append({"type": "image", "url": content[0]})
    return messages

# ──────────────────────────────────────────────────────────────────────────────
# GENERATION
# ──────────────────────────────────────────────────────────────────────────────
@spaces.GPU(duration=120)
def run(message: dict, history: list[dict], system_prompt: str = "", max_new_tokens: int = 512) -> Iterator[str]:
    if not validate_media_constraints(message, history):
        yield ""
        return

    messages = []
    if system_prompt:
        messages.append({"role": "system", "content": [{"type": "text", "text": system_prompt}]})
    messages.extend(process_history(history))
    messages.append({"role": "user", "content": process_new_user_message(message)})

    inputs = processor.apply_chat_template(
        messages,
        add_generation_prompt=True,
        tokenize=True,
        return_dict=True,
        return_tensors="pt",
    ).to(device=model.device, dtype=torch.bfloat16)

    # TextIteratorStreamer wants a tokenizer-like object
    tokenizer_for_stream = getattr(processor, "tokenizer", processor)

    streamer = TextIteratorStreamer(
        tokenizer_for_stream, timeout=30.0, skip_prompt=True, skip_special_tokens=True
    )

    generate_kwargs = dict(
        inputs,
        streamer=streamer,
        max_new_tokens=max_new_tokens,
        disable_compile=True,
    )

    t = Thread(target=model.generate, kwargs=generate_kwargs)
    t.start()

    output = ""
    for delta in streamer:
        output += delta
        yield output

# ──────────────────────────────────────────────────────────────────────────────
# EXAMPLES + DESCRIPTION
# ──────────────────────────────────────────────────────────────────────────────
examples = [
    [{"text": "I need to be in Japan for 10 days, going to Tokyo, Kyoto and Osaka. Think about number of attractions in each of them and allocate number of days to each city. Make public transport recommendations.", "files": []}],
    [{"text": "Write the matplotlib code to generate the same bar chart.", "files": ["assets/additional-examples/barchart.png"]}],
    [{"text": "What is odd about this video?", "files": ["assets/additional-examples/tmp.mp4"]}],
    [{"text": "I already have this supplement <image> and I want to buy this one <image>. Any warnings I should know about?", "files": ["assets/additional-examples/pill1.png", "assets/additional-examples/pill2.png"]}],
    [{"text": "Write a poem inspired by the visual elements of the images.", "files": ["assets/sample-images/06-1.png", "assets/sample-images/06-2.png"]}],
    [{"text": "Compose a short musical piece inspired by the visual elements of the images.", "files": ["assets/sample-images/07-1.png", "assets/sample-images/07-2.png", "assets/sample-images/07-3.png", "assets/sample-images/07-4.png"]}],
    [{"text": "Write a short story about what might have happened in this house.", "files": ["assets/sample-images/08.png"]}],
    [{"text": "Create a short story based on the sequence of images.", "files": ["assets/sample-images/09-1.png", "assets/sample-images/09-2.png", "assets/sample-images/09-3.png", "assets/sample-images/09-4.png", "assets/sample-images/09-5.png"]}],
    [{"text": "Describe the creatures that would live in this world.", "files": ["assets/sample-images/10.png"]}],
    [{"text": "Read text in the image.", "files": ["assets/additional-examples/1.png"]}],
    [{"text": "When is this ticket dated and how much did it cost?", "files": ["assets/additional-examples/2.png"]}],
    [{"text": "Read the text in the image into markdown.", "files": ["assets/additional-examples/3.png"]}],
    [{"text": "Evaluate this integral.", "files": ["assets/additional-examples/4.png"]}],
    [{"text": "caption this image", "files": ["assets/sample-images/01.png"]}],
    [{"text": "What's the sign says?", "files": ["assets/sample-images/02.png"]}],
    [{"text": "Compare and contrast the two images.", "files": ["assets/sample-images/03.png"]}],
    [{"text": "List all the objects in the image and their colors.", "files": ["assets/sample-images/04.png"]}],
    [{"text": "Describe the atmosphere of the scene.", "files": ["assets/sample-images/05.png"]}],
]

DESCRIPTION = """\
<img src='https://huggingface.co/spaces/huggingface-projects/gemma-3-12b-it/resolve/main/assets/logo.png' id='logo' />
This is a demo of Gemma 3 12B IT, a vision language model with outstanding performance on a wide range of tasks.
You can upload images, interleaved images and videos. Note that video input only supports single-turn conversation and mp4 input.
"""

# ──────────────────────────────────────────────────────────────────────────────
# UI (keeps your ChatInterface layout + adds tiny per-bubble copy icon)
# Using Blocks so we can inject JS; we mimic your layout (title/description/inputs).
# ──────────────────────────────────────────────────────────────────────────────
with gr.Blocks(
    css="""
/* Make message container allow a tiny button outside the bubble edge */
#chat-root [data-testid*="message"],
#chat-root .message { position: relative; overflow: visible; }

/* Tiny circular copy icon, bottom-right, slightly outside so it doesn't cover text */
.bubble-copy{
  position:absolute;
  bottom:-0.35rem;   /* sit just outside the bubble */
  right:-0.35rem;
  width:22px; height:22px;
  display:flex; align-items:center; justify-content:center;
  border-radius:9999px;
  border:1px solid rgba(0,0,0,.15);
  background:rgba(255,255,255,.96);
  box-shadow:0 1px 2px rgba(0,0,0,.10);
  font-size:12px; line-height:1; padding:0;
  cursor:pointer; opacity:.85;
}
.bubble-copy:hover{ opacity:1; }

/* Optional: tighten spacing a bit on additional inputs */
#extra-controls .wrap { gap: .5rem; }
"""
) as demo:

    # We render title/description on top to avoid footer behavior, matching your layout.
    gr.Markdown("# Gemma 3 12B IT")
    gr.Markdown(DESCRIPTION)

    chat = gr.ChatInterface(
        fn=run,
        type="messages",
        chatbot=gr.Chatbot(type="messages", scale=1, allow_tags=["image"], elem_id="chat-root"),
        textbox=gr.MultimodalTextbox(file_types=["image", ".mp4"], file_count="multiple", autofocus=True),
        multimodal=True,
        additional_inputs=[
            gr.Textbox(label="System Prompt", value="You are a helpful assistant.", elem_id="sys-prompt"),
            gr.Slider(label="Max New Tokens", minimum=100, maximum=2000, step=10, value=700, elem_id="max-toks"),
        ],
        stop_btn=False,
        # Avoid ChatInterface.title to prevent footer placement; we show header above instead.
        # title="Gemma 3 12B IT",
        # description=DESCRIPTION,
        examples=examples,
        run_examples_on_click=False,
        cache_examples=False,
        css_paths="style.css",
        delete_cache=(1800, 1800),
    )

    # Inject a small bottom-right copy icon into every assistant bubble
    demo.load(
        fn=None, inputs=None, outputs=None,
        js=r"""
() => {
  const root = document.querySelector('#chat-root');
  if (!root) return;

  // Minimal HTML β†’ Markdown converter that preserves code fences.
  const toMarkdown = (node) => {
    if (node.nodeType === Node.TEXT_NODE) return node.nodeValue.replace(/\s+/g,' ');
    if (node.nodeType !== Node.ELEMENT_NODE) return '';
    const tag = node.tagName?.toLowerCase?.() || '';
    const kids = () => Array.from(node.childNodes).map(toMarkdown).join('');
    switch (tag) {
      case 'strong': case 'b': return '**' + kids().trim() + '**';
      case 'em': case 'i': return '*' + kids().trim() + '*';
      case 'code':
        if (node.parentElement && node.parentElement.tagName.toLowerCase()==='pre') return kids();
        return '`' + kids().trim() + '`';
      case 'pre': {
        const code = node.querySelector('code');
        const content = code ? code.textContent : node.textContent;
        return '\n```\n' + (content || '').replace(/\n+$/,'') + '\n```\n';
      }
      case 'br': return '  \n';
      case 'p': return kids().trim() + '\n\n';
      case 'ul': { let out=''; node.querySelectorAll(':scope>li').forEach(li=>{
        const m = toMarkdown(li).trim(); out += (m.startsWith('- ')?m:'- '+m)+'\n';
      }); return out+'\n'; }
      case 'ol': { let out='',i=1; node.querySelectorAll(':scope>li').forEach(li=>{
        out += (i++)+'. '+toMarkdown(li).trim()+'\n';
      }); return out+'\n'; }
      case 'li': {
        let parts=''; Array.from(node.childNodes).forEach(ch=>{
          const md = toMarkdown(ch); parts += md;
          if (ch.tagName && /ul|ol/i.test(ch.tagName)) parts += '\n';
        }); return parts.trim();
      }
      case 'a': { const href=node.getAttribute('href')||''; const text=kids().trim()||href; return `[${text}](${href})`; }
      case 'img': { const alt=node.getAttribute('alt')||''; const src=node.getAttribute('src')||''; return `![${alt}](${src})`; }
      case 'blockquote': return '> '+kids().trim().replace(/\n/g,'\n> ')+'\n\n';
      case 'hr': return '\n---\n';
      case 'h1': return '# '+kids().trim()+'\n\n';
      case 'h2': return '## '+kids().trim()+'\n\n';
      case 'h3': return '### '+kids().trim()+'\n\n';
      case 'h4': return '#### '+kids().trim()+'\n\n';
      case 'h5': return '##### '+kids().trim()+'\n\n';
      case 'h6': return '###### '+kids().trim()+'\n\n';
      default: return kids();
    }
  };

  const addCopyButtons = () => {
    const bots = root.querySelectorAll(
      '[data-testid="chatbot-message-bot"], [data-testid="bot"], .message.bot, .wrap.bot'
    );
    bots.forEach(msg => {
      if (msg.querySelector('.bubble-copy')) return;
      if (getComputedStyle(msg).position === 'static') msg.style.position = 'relative';
      const btn = document.createElement('button');
      btn.className = 'bubble-copy';
      btn.title = 'Copy as Markdown';
      btn.setAttribute('aria-label', 'Copy message');
      btn.textContent = 'πŸ“‹'; // tiny icon
      btn.addEventListener('click', (e) => {
        e.stopPropagation();
        const container = document.createElement('div');
        container.innerHTML = msg.innerHTML;
        let markdown = Array.from(container.childNodes).map(toMarkdown).join('')
          .replace(/[ \t]+\n/g,'\n')
          .replace(/\n{3,}/g,'\n\n')
          .trim();
        const items = {};
        const html = msg.innerHTML;
        if (html && window.Blob) items['text/html'] = new Blob([html], {type:'text/html'});
        items['text/plain'] = new Blob([markdown], {type:'text/plain'});
        if (navigator.clipboard && window.ClipboardItem) {
          navigator.clipboard.write([new ClipboardItem(items)]).catch(()=>{});
        } else if (navigator.clipboard && navigator.clipboard.writeText) {
          navigator.clipboard.writeText(markdown).catch(()=>{});
        } else {
          const ta=document.createElement('textarea');
          ta.value=markdown; ta.style.position='fixed'; ta.style.opacity='0';
          document.body.appendChild(ta); ta.select();
          try{ document.execCommand('copy'); }catch(e){}
          document.body.removeChild(ta);
        }
      });
      msg.appendChild(btn);
    });
  };

  addCopyButtons();
  const obs = new MutationObserver(() => addCopyButtons());
  obs.observe(root, { childList: true, subtree: true });
}
"""
    )

# ──────────────────────────────────────────────────────────────────────────────
# LAUNCH
# ──────────────────────────────────────────────────────────────────────────────
if __name__ == "__main__":
    demo.launch()