SandraCLV commited on
Commit
460a57d
·
1 Parent(s): be8da16

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -8
app.py CHANGED
@@ -5,19 +5,19 @@ import librosa
5
 
6
  checkpoint = "openai/whisper-base"
7
  # checkpoint = "/innev/open-ai/huggingface/openai/whisper-base"
8
- processor = WhisperProcessor.from_pretrained(checkpoint)
9
- model = WhisperForConditionalGeneration.from_pretrained(checkpoint)
10
- text_Interface=gr.Interface.load("models/nlpconnect/vit-gpt2-image-captioning")
11
 
12
- def greet():
13
- return "Hello "
 
 
14
 
15
  with gr.Blocks() as demo:
16
  gr.Markdown("Start typing below and then click **Run** to see the output.")
17
  with gr.Row():
18
  inp = gr.Image(type='pil')
19
- out = gr.Textbox()
20
- gr.Interface(fn=greet, inputs=inp, outputs=out)
21
 
22
  demo.launch()
23
- text_Interface.launch()
 
5
 
6
  checkpoint = "openai/whisper-base"
7
  # checkpoint = "/innev/open-ai/huggingface/openai/whisper-base"
8
+ image_to_text_model = pipeline("image-classification")
9
+ text_to_audio_model = pipeline("text-to-speech")
 
10
 
11
+ def image_to_text(input_image):
12
+ # Convertir la imagen a texto
13
+ text_output = image_to_text_model(input_image)[0]['label']
14
+ return text_output
15
 
16
  with gr.Blocks() as demo:
17
  gr.Markdown("Start typing below and then click **Run** to see the output.")
18
  with gr.Row():
19
  inp = gr.Image(type='pil')
20
+ out = gr.Textbox(placeholder=image_to_text(inp))
21
+ gr.Interface(fn=image_to_text, inputs=inp, outputs=[out])
22
 
23
  demo.launch()