Qwen-VL / app.py
KingNish's picture
Update app.py
a16627e verified
raw
history blame
3.69 kB
import gradio as gr
import spaces
from transformers import Qwen2VLForConditionalGeneration, AutoProcessor
from qwen_vl_utils import process_vision_info
import torch
from PIL import Image
import subprocess
from datetime import datetime
import numpy as np
import os
# Install flash-attn
subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
# Model and Processor Loading (Done once at startup)
MODEL_ID = "Qwen/Qwen2-VL-7B-Instruct"
model = Qwen2VLForConditionalGeneration.from_pretrained(MODEL_ID, trust_remote_code=True, attn_implementation="flash_attention_2", torch_dtype="auto").cuda().eval()
processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True)
DESCRIPTION = "[Qwen2-VL-7B Demo](https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct)"
# Helper function to save media and return path
def save_media_and_get_path(media, media_type):
if media is None:
gr.Warning(f"No {media_type} provided. Please upload a {media_type} before submitting.")
raise ValueError(f"No {media_type} provided.")
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
filename = f"{media_type}_{timestamp}.{'png' if media_type == 'image' else 'mp4'}"
media.save(filename)
return os.path.abspath(filename)
@spaces.GPU
def qwen_inference(media, media_type, text_input=None):
media_path = save_media_and_get_path(media, media_type)
messages = [
{
"role": "user",
"content": [
{
"type": media_type,
media_type: media_path,
**({"max_pixels": 360 * 420, "fps": 6.0} if media_type == "video" else {}),
},
{"type": "text", "text": text_input},
],
}
]
text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
image_inputs, video_inputs = process_vision_info(messages)
inputs = processor(
text=[text],
images=image_inputs,
videos=video_inputs,
padding=True,
return_tensors="pt",
).to("cuda")
generated_ids = model.generate(**inputs, max_new_tokens=1024)
generated_ids_trimmed = [out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)]
output_text = processor.batch_decode(generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
return output_text
css = """
#output {
height: 500px;
overflow: auto;
border: 1px solid #ccc;
}
"""
with gr.Blocks(css=css) as demo:
gr.Markdown(DESCRIPTION)
with gr.Tab(label="Image Input"):
with gr.Row():
with gr.Column():
input_img = gr.Image(label="Input Picture", type="pil")
text_input_image = gr.Textbox(label="Question")
submit_btn_image = gr.Button(value="Submit")
with gr.Column():
output_text_image = gr.Textbox(label="Output Text")
submit_btn_image.click(qwen_inference, [input_img, "image", text_input_image], [output_text_image])
with gr.Tab(label="Video Input"):
with gr.Row():
with gr.Column():
input_video = gr.Video(label="Input Video")
text_input_video = gr.Textbox(label="Question")
submit_btn_video = gr.Button(value="Submit")
with gr.Column():
output_text_video = gr.Textbox(label="Output Text")
submit_btn_video.click(qwen_inference, [input_video, "video", text_input_video], [output_text_video])
demo.queue(api_open=False)
demo.launch(debug=True)