import torch from transformers import AutoModel, AutoTokenizer from utils import load_image, load_video if __name__ == "__main__": dir, rev = 'morpheushoc/InternVL2_5-2B', 'main' # path = 'OpenGVLab/InternVL2_5-2B' model = AutoModel.from_pretrained(dir, torch_dtype=torch.bfloat16, load_in_8bit=False, low_cpu_mem_usage=True, use_flash_attn=True, trust_remote_code=True, revision=rev).eval().cuda() tokenizer = AutoTokenizer.from_pretrained(dir, trust_remote_code=True, use_fast=False) generation_config = dict(max_new_tokens=1024, do_sample=False) paths = [ 'image1.jpg', 'image1.jpg', 'image2.jpg', 'red-panda.mp4', ] questions = [ 'describe this image', 'describe this image', 'describe this image', 'describe this video' ] pixel_values, num_patches_list = [], [] for i, fp in enumerate(paths): if fp.endswith('mp4'): pxl_val, num_patches = load_video(fp, num_segments=8, max_num=1) prefix = ''.join([f'Frame{i+1}: \n' for i in range(len(num_patches))]) else: pxl_val = load_image(fp, max_num=12).to(torch.bfloat16) num_patches = [len(pxl_val)] prefix = '\n' pixel_values.append(pxl_val) num_patches_list.append(num_patches) questions[i] = prefix + questions[i] pixel_values = torch.cat(pixel_values).to(torch.bfloat16).cuda() response = model.batch_chat(tokenizer, pixel_values, questions, generation_config, num_patches_list=num_patches_list, history=None, return_history=False) for q, r in zip(questions, response): print(f'User: {q}\nAssistant: {r}') print('\n')