Fadri commited on
Commit
c731f9b
·
verified ·
1 Parent(s): dd241eb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -6
app.py CHANGED
@@ -2,7 +2,7 @@ import gradio as gr
2
  from transformers import pipeline
3
 
4
  # Load models
5
- vit_classifier = pipeline("image-classification", model="LukeXOTWOD/vit-base-oxford-iiit-pets")
6
  clip_detector = pipeline(model="openai/clip-vit-large-patch14", task="zero-shot-image-classification")
7
 
8
  labels_oxford_pets = [
@@ -20,17 +20,23 @@ def classify_pet(image):
20
  clip_results = clip_detector(image, candidate_labels=labels_oxford_pets)
21
  clip_output = {result['label']: result['score'] for result in clip_results}
22
 
23
- return {
24
- "ViT Transfer Learning Model": vit_output,
25
- "CLIP Zero-Shot Model": clip_output
26
- }
 
 
 
 
 
27
 
28
  iface = gr.Interface(
29
  fn=classify_pet,
30
  inputs=gr.Image(type="filepath"),
31
  outputs=gr.JSON(),
32
  title="Pet Classification Comparison",
33
- description="Upload an image of a pet, and compare predictions from a trained ViT model and a zero-shot CLIP model."
 
34
  )
35
 
36
  iface.launch()
 
2
  from transformers import pipeline
3
 
4
  # Load models
5
+ vit_classifier = pipeline("image-classification", model="Fadri/vit-base-oxford-iiit-pets")
6
  clip_detector = pipeline(model="openai/clip-vit-large-patch14", task="zero-shot-image-classification")
7
 
8
  labels_oxford_pets = [
 
20
  clip_results = clip_detector(image, candidate_labels=labels_oxford_pets)
21
  clip_output = {result['label']: result['score'] for result in clip_results}
22
 
23
+ return {"ViT Classification": vit_output, "CLIP Zero-Shot Classification": clip_output}
24
+
25
+ example_images = [
26
+ ["example_images/dog1.jpeg"],
27
+ ["example_images/dog2.jpeg"],
28
+ ["example_images/leonberger.jpg"],
29
+ ["example_images/snow_leopard.jpeg"],
30
+ ["example_images/cat.jpg"]
31
+ ]
32
 
33
  iface = gr.Interface(
34
  fn=classify_pet,
35
  inputs=gr.Image(type="filepath"),
36
  outputs=gr.JSON(),
37
  title="Pet Classification Comparison",
38
+ description="Upload an image of a pet, and compare results from a trained ViT model and a zero-shot CLIP model.",
39
+ examples=example_images
40
  )
41
 
42
  iface.launch()