SandraCLV commited on
Commit
ec574b7
·
1 Parent(s): e1f4cc6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -27
app.py CHANGED
@@ -1,33 +1,30 @@
1
- import os
2
- from gtts import gTTS
3
- from pdfminer.high_level import extract_text
4
  import gradio as gr
5
- import os
 
6
 
 
 
 
 
 
7
 
8
- def pdf_to_text(file_obj):
9
- text = extract_text(file_obj.name)
10
- myobj = gTTS(text=text, lang='en', slow=False)
11
- myobj.save("test.wav")
12
- return 'test.wav'
13
 
 
 
 
 
 
 
 
 
 
14
 
15
- examples = [
16
- [os.path.abspath("short-pdf.pdf")],
17
- [os.path.abspath("long-pdf.pdf")]
18
- ]
 
 
19
 
20
-
21
- iface = gr.Interface(fn = pdf_to_text,
22
- inputs = 'file',
23
- outputs = 'audio',
24
- title = 'PDF to Audio Application',
25
- description = 'A simple application to convert PDF files in audio speech. Upload your own file, or click one of the examples to load them.',
26
- article =
27
- '''<div>
28
- <p style="text-align: center"> All you need to do is to upload the pdf file and hit submit, then wait for compiling. After that click on Play/Pause for listing to the audio. The audio is saved in a wav format.</p>
29
- </div>''',
30
- examples=examples
31
- )
32
-
33
- iface.launch()
 
 
 
 
1
  import gradio as gr
2
+ from transformers import ImageClassificationPipeline, PerceiverForImageClassificationConvProcessing, PerceiverFeatureExtractor
3
+ import torch
4
 
5
+ torch.hub.download_url_to_file('http://images.cocodataset.org/val2017/000000039769.jpg', 'cats.jpg')
6
+ torch.hub.download_url_to_file('https://storage.googleapis.com/perceiver_io/dalmation.jpg', 'dog.jpg')
7
+
8
+ feature_extractor = PerceiverFeatureExtractor.from_pretrained("deepmind/vision-perceiver-conv")
9
+ model = PerceiverForImageClassificationConvProcessing.from_pretrained("deepmind/vision-perceiver-conv")
10
 
11
+ image_pipe = ImageClassificationPipeline(model=model, feature_extractor=feature_extractor)
 
 
 
 
12
 
13
+ def classify_image(image):
14
+ results = image_pipe(image)
15
+ # convert to format Gradio expects
16
+ output = {}
17
+ for prediction in results:
18
+ predicted_label = prediction['label']
19
+ score = prediction['score']
20
+ output[predicted_label] = score
21
+ return output
22
 
23
+ image = gr.inputs.Image(type="pil")
24
+ label = gr.outputs.Label(num_top_classes=5)
25
+ examples = [["cats.jpg"], ["dog.jpg"]]
26
+ title = "Interactive demo: Perceiver for image classification"
27
+ description = "Demo for classifying images with Perceiver IO. To use it, simply upload an image or use the example images below and click 'submit' to let the model predict the 5 most probable ImageNet classes. Results will show up in a few seconds."
28
+ article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2107.14795'>Perceiver IO: A General Architecture for Structured Inputs & Outputs</a> | <a href='https://deepmind.com/blog/article/building-architectures-that-can-handle-the-worlds-data/'>Official blog</a></p>"
29
 
30
+ gr.Interface(fn=classify_image, inputs=image, outputs=label, title=title, description=description, examples=examples, enable_queue=True).launch(debug=True)