prithivMLmods commited on
Commit
7c0a5ab
·
verified ·
1 Parent(s): 66f90ea

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -2
app.py CHANGED
@@ -47,6 +47,15 @@ model_x = Qwen2_5_VLForConditionalGeneration.from_pretrained(
47
  torch_dtype=torch.float16
48
  ).to(device).eval()
49
 
 
 
 
 
 
 
 
 
 
50
  def downsample_video(video_path):
51
  """
52
  Downsamples the video to evenly spaced frames.
@@ -85,6 +94,9 @@ def generate_image(model_name: str, text: str, image: Image.Image,
85
  elif model_name == "Qwen2.5-VL-3B-Instruct":
86
  processor = processor_x
87
  model = model_x
 
 
 
88
  else:
89
  yield "Invalid model selected.", "Invalid model selected."
90
  return
@@ -136,6 +148,9 @@ def generate_video(model_name: str, text: str, video_path: str,
136
  elif model_name == "Qwen2.5-VL-3B-Instruct":
137
  processor = processor_x
138
  model = model_x
 
 
 
139
  else:
140
  yield "Invalid model selected.", "Invalid model selected."
141
  return
@@ -183,8 +198,9 @@ def generate_video(model_name: str, text: str, video_path: str,
183
 
184
  # Define examples for image and video inference
185
  image_examples = [
 
186
  ["Jsonify Data.", "images/1.jpg"],
187
- ["Explain the pie-chart in detail.", "images/2.jpg"]
188
  ]
189
 
190
  video_examples = [
@@ -246,13 +262,14 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
246
  markdown_output = gr.Markdown()
247
 
248
  model_choice = gr.Radio(
249
- choices=["Qwen2.5-VL-7B-Instruct", "Qwen2.5-VL-3B-Instruct"],
250
  label="Select Model",
251
  value="Qwen2.5-VL-7B-Instruct"
252
  )
253
  gr.Markdown("**Model Info 💻** | [Report Bug](https://huggingface.co/spaces/prithivMLmods/Qwen2.5-VL/discussions)")
254
  gr.Markdown("> [Qwen2.5-VL-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct): The Qwen2.5-VL-7B-Instruct model is a multimodal AI model developed by Alibaba Cloud that excels at understanding both text and images. It's a Vision-Language Model (VLM) designed to handle various visual understanding tasks, including image understanding, video analysis, and even multilingual support.")
255
  gr.Markdown("> [Qwen2.5-VL-3B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-3B-Instruct): Qwen2.5-VL-3B-Instruct is an instruction-tuned vision-language model from Alibaba Cloud, built upon the Qwen2-VL series. It excels at understanding and generating text related to both visual and textual inputs, making it capable of tasks like image captioning, visual question answering, and object localization. The model also supports long video understanding and structured data extraction")
 
256
  gr.Markdown(">⚠️note: all the models in space are not guaranteed to perform well in video inference use cases.")
257
 
258
  image_submit.click(
 
47
  torch_dtype=torch.float16
48
  ).to(device).eval()
49
 
50
+ # Load Qwen2.5-VL-7B-Abliterated-Caption-it
51
+ MODEL_ID_Q = "prithivMLmods/Qwen2.5-VL-7B-Abliterated-Caption-it"
52
+ processor_q = AutoProcessor.from_pretrained(MODEL_ID_Q, trust_remote_code=True)
53
+ model_q = Qwen2_5_VLForConditionalGeneration.from_pretrained(
54
+ MODEL_ID_Q,
55
+ trust_remote_code=True,
56
+ torch_dtype=torch.float16
57
+ ).to(device).eval()
58
+
59
  def downsample_video(video_path):
60
  """
61
  Downsamples the video to evenly spaced frames.
 
94
  elif model_name == "Qwen2.5-VL-3B-Instruct":
95
  processor = processor_x
96
  model = model_x
97
+ elif model_name == "Qwen2.5-VL-7B-Abliterated-Caption-it":
98
+ processor = processor_q
99
+ model = model_q
100
  else:
101
  yield "Invalid model selected.", "Invalid model selected."
102
  return
 
148
  elif model_name == "Qwen2.5-VL-3B-Instruct":
149
  processor = processor_x
150
  model = model_x
151
+ elif model_name == "Qwen2.5-VL-7B-Abliterated-Caption-it":
152
+ processor = processor_q
153
+ model = model_q
154
  else:
155
  yield "Invalid model selected.", "Invalid model selected."
156
  return
 
198
 
199
  # Define examples for image and video inference
200
  image_examples = [
201
+ ["Explain the pie-chart in detail.", "images/2.jpg"],
202
  ["Jsonify Data.", "images/1.jpg"],
203
+ ["Provide a detailed caption for the image..", "images/A.jpg"]
204
  ]
205
 
206
  video_examples = [
 
262
  markdown_output = gr.Markdown()
263
 
264
  model_choice = gr.Radio(
265
+ choices=["Qwen2.5-VL-7B-Instruct", "Qwen2.5-VL-3B-Instruct", "Qwen2.5-VL-7B-Abliterated-Caption-it"],
266
  label="Select Model",
267
  value="Qwen2.5-VL-7B-Instruct"
268
  )
269
  gr.Markdown("**Model Info 💻** | [Report Bug](https://huggingface.co/spaces/prithivMLmods/Qwen2.5-VL/discussions)")
270
  gr.Markdown("> [Qwen2.5-VL-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct): The Qwen2.5-VL-7B-Instruct model is a multimodal AI model developed by Alibaba Cloud that excels at understanding both text and images. It's a Vision-Language Model (VLM) designed to handle various visual understanding tasks, including image understanding, video analysis, and even multilingual support.")
271
  gr.Markdown("> [Qwen2.5-VL-3B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-3B-Instruct): Qwen2.5-VL-3B-Instruct is an instruction-tuned vision-language model from Alibaba Cloud, built upon the Qwen2-VL series. It excels at understanding and generating text related to both visual and textual inputs, making it capable of tasks like image captioning, visual question answering, and object localization. The model also supports long video understanding and structured data extraction")
272
+ gr.Markdown("> [Qwen2.5-VL-7B-Abliterated-Caption-it](https://huggingface.co/Qwen/Qwen2.5-VL-3B-Instruct): Qwen2.5-VL-7B-Abliterated-Caption-it is a fine-tuned version of Qwen2.5-VL-7B-Instruct, optimized for Abliterated Captioning / Uncensored Captioning. This model excels at generating detailed, context-rich, and high-fidelity captions across diverse image categories and variational aspect ratios, offering robust visual understanding without filtering or censorship.")
273
  gr.Markdown(">⚠️note: all the models in space are not guaranteed to perform well in video inference use cases.")
274
 
275
  image_submit.click(