SandraCLV commited on
Commit
b62d206
·
1 Parent(s): cce26d4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -23
app.py CHANGED
@@ -2,29 +2,10 @@ import gradio as gr
2
  from transformers import ImageClassificationPipeline, PerceiverForImageClassificationConvProcessing, PerceiverFeatureExtractor
3
  import torch
4
 
5
- torch.hub.download_url_to_file('http://images.cocodataset.org/val2017/000000039769.jpg', 'cats.jpg')
6
- torch.hub.download_url_to_file('https://storage.googleapis.com/perceiver_io/dalmation.jpg', 'dog.jpg')
7
-
8
- feature_extractor = PerceiverFeatureExtractor.from_pretrained("deepmind/vision-perceiver-conv")
9
- model = PerceiverForImageClassificationConvProcessing.from_pretrained("deepmind/vision-perceiver-conv")
10
 
11
- image_pipe = ImageClassificationPipeline(model=model, feature_extractor=feature_extractor)
12
 
13
- def classify_image(image):
14
- results = image_pipe(image)
15
- # convert to format Gradio expects
16
- output = {}
17
- for prediction in results:
18
- predicted_label = prediction['label']
19
- score = prediction['score']
20
- output[predicted_label] = score
21
- return output
22
 
23
- image = gr.inputs.Image(type="pil")
24
- label = gr.outputs.Label(num_top_classes=5)
25
- examples = [["cats.jpg"], ["dog.jpg"]]
26
- title = "Interactive demo: image classification to text and audio speech"
27
- description = "Demo for image classification to text and audio speech. To use it, simply upload an image or use the example images below and click 'submit' to let the model predict the 5 most probable ImageNet classes. Results will show up in a few seconds."
28
- article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2107.14795'>Perceiver IO: A General Architecture for Structured Inputs & Outputs</a> | <a href='https://deepmind.com/blog/article/building-architectures-that-can-handle-the-worlds-data/'>Official blog</a></p>"
29
-
30
- gr.Interface(fn=classify_image, inputs=image, outputs=label, title=title, description=description, examples=examples, enable_queue=True).launch(debug=True)
 
2
  from transformers import ImageClassificationPipeline, PerceiverForImageClassificationConvProcessing, PerceiverFeatureExtractor
3
  import torch
4
 
5
+ def greet(name):
6
+ return "Hello " + name
 
 
 
7
 
 
8
 
9
+ demo = gr.Interface(fn=greet, inputs="text", outputs="text")
 
 
 
 
 
 
 
 
10
 
11
+ demo.launch()