File size: 2,006 Bytes
11ef54b b4837db 11ef54b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
import torch
from transformers import AutoModel, AutoTokenizer
from utils import load_image, load_video
if __name__ == "__main__":
dir, rev = 'morpheushoc/InternVL2_5-2B', 'main'
# path = 'OpenGVLab/InternVL2_5-2B'
model = AutoModel.from_pretrained(dir,
torch_dtype=torch.bfloat16,
load_in_8bit=False,
low_cpu_mem_usage=True,
use_flash_attn=True,
trust_remote_code=True,
revision=rev).eval().cuda()
tokenizer = AutoTokenizer.from_pretrained(dir, trust_remote_code=True, use_fast=False)
generation_config = dict(max_new_tokens=1024, do_sample=False)
paths = [
'image1.jpg',
'image1.jpg',
'image2.jpg',
'red-panda.mp4',
]
questions = [
'describe this image',
'describe this image',
'describe this image',
'describe this video'
]
pixel_values, num_patches_list = [], []
for i, fp in enumerate(paths):
if fp.endswith('mp4'):
pxl_val, num_patches = load_video(fp, num_segments=8, max_num=1)
prefix = ''.join([f'Frame{i+1}: <image>\n' for i in range(len(num_patches))])
else:
pxl_val = load_image(fp, max_num=12).to(torch.bfloat16)
num_patches = [len(pxl_val)]
prefix = '<image>\n'
pixel_values.append(pxl_val)
num_patches_list.append(num_patches)
questions[i] = prefix + questions[i]
pixel_values = torch.cat(pixel_values).to(torch.bfloat16).cuda()
response = model.batch_chat(tokenizer, pixel_values, questions, generation_config,
num_patches_list=num_patches_list, history=None, return_history=False)
for q, r in zip(questions, response):
print(f'User: {q}\nAssistant: {r}')
print('\n') |