import gradio as gr from transformers import pipeline,WhisperProcessor, WhisperForConditionalGeneration import torch import librosa checkpoint = "openai/whisper-base" # checkpoint = "/innev/open-ai/huggingface/openai/whisper-base" image_to_text_model = pipeline("image-classification") text_to_audio_model = pipeline("text-to-speech") def image_to_text(input_image): # Convertir la imagen a texto text_output = image_to_text_model(input_image)[0]['label'] return text_output with gr.Blocks() as demo: gr.Markdown("Start typing below and then click **Run** to see the output.") with gr.Row(): inp = gr.Image(type='pil') out = gr.Textbox(placeholder=image_to_text(inp)) gr.Interface(fn=image_to_text, inputs=inp, outputs=[out],interpretation="default") demo.launch()