yaya-sy commited on
Commit
5b822d2
Β·
verified Β·
1 Parent(s): 0aeae25

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -88,7 +88,7 @@ def downsample_video(video_path):
88
  vidcap.release()
89
  return frames
90
 
91
- MODEL_ID = "kaamd/chtvctr" # "kaamd/chtvctr" # Alternatively: "Qwen/Qwen2.5-VL-3B-Instruct"
92
  processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True, min_pixels=256*28*28, max_pixels=1280*28*28)
93
  model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
94
  MODEL_ID,
@@ -191,11 +191,11 @@ def model_inference(input_dict, history):
191
 
192
  # Option 1: Use regular Interface with streaming (recommended)
193
  with gr.Blocks() as demo:
194
- gr.Markdown("# oolel-vision-experimental `@video-infer for video understanding`")
195
 
196
  chatbot = gr.Chatbot()
197
  msg = gr.MultimodalTextbox(
198
- label="Query Input",
199
  file_types=["image", "video"],
200
  file_count="multiple"
201
  )
 
88
  vidcap.release()
89
  return frames
90
 
91
+ MODEL_ID = "yaya-sy/chvtr" # "kaamd/chtvctr" # Alternatively: "Qwen/Qwen2.5-VL-3B-Instruct"
92
  processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True, min_pixels=256*28*28, max_pixels=1280*28*28)
93
  model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
94
  MODEL_ID,
 
191
 
192
  # Option 1: Use regular Interface with streaming (recommended)
193
  with gr.Blocks() as demo:
194
+ gr.Markdown("# Oolel`")
195
 
196
  chatbot = gr.Chatbot()
197
  msg = gr.MultimodalTextbox(
198
+ label="Your Request",
199
  file_types=["image", "video"],
200
  file_count="multiple"
201
  )