Spaces:
Running
Running
from transformers import pipeline | |
from PIL import Image | |
def predict_step(img_array): | |
image_to_text = pipeline("image-to-text", model="nlpconnect/vit-gpt2-image-captioning") | |
i_image = Image.fromarray(img_array) | |
if i_image.mode != "RGB": | |
i_image = i_image.convert(mode="RGB") | |
prediction = image_to_text(i_image) | |
return prediction[0]['generated_text'] | |
# from transformers import VisionEncoderDecoderModel, ViTImageProcessor, AutoTokenizer | |
# from PIL import Image | |
# | |
# model = VisionEncoderDecoderModel.from_pretrained("vit-gpt2-image-captioning") | |
# feature_extractor = ViTImageProcessor.from_pretrained("vit-gpt2-image-captioning") | |
# tokenizer = AutoTokenizer.from_pretrained("vit-gpt2-image-captioning") | |
# | |
# max_length = 16 | |
# num_beams = 4 | |
# gen_kwargs = {"max_length": max_length, "num_beams": num_beams} | |
# | |
# | |
# def predict_step(img_array): | |
# i_image = Image.fromarray(img_array) | |
# | |
# if i_image.mode != "RGB": | |
# i_image = i_image.convert(mode="RGB") | |
# | |
# pixel_values = feature_extractor(images=i_image, return_tensors="pt", do_normalize=True).pixel_values | |
# | |
# output_ids = model.generate(pixel_values, **gen_kwargs) | |
# | |
# pred = tokenizer.batch_decode(output_ids, skip_special_tokens=True) | |
# pred = [p.strip() for p in pred] | |
# return pred | |