ryanramos commited on
Commit
bce74b5
·
1 Parent(s): bbd9cd5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -6
app.py CHANGED
@@ -19,8 +19,11 @@ image_transform = testing_image_transform()
19
  question_transform = ALBEFTextTransform(add_end_token=False)
20
  answer_transform = ALBEFTextTransform(do_pre_process=False)
21
 
 
22
  answer_list = json.load(open(data_dir + "answer_list.json", "r"))
23
 
 
 
24
  def infer(image, question):
25
  images = [image]
26
  image_input = [image_transform(image) for image in images]
@@ -49,12 +52,7 @@ demo = gr.Interface(
49
  fn=infer,
50
  inputs=[gr.Image(label='image', type='pil', image_mode='RGB'), gr.Text(label='question')],
51
  outputs=gr.Text(label='answer'),
52
- # examples=[
53
- # ['vqav2.png', 'What sport is this?'],
54
- # ['vizwiz.jpeg', 'What piece of meat have I taken out of the freezer?'],
55
- # ['aqua.png', 'what does bol lean nonchalantly on'],
56
- # ['robotvqa.png', 'How many silver spoons are there?'],
57
- # ]
58
  )
59
 
60
  demo.launch()
 
19
  question_transform = ALBEFTextTransform(add_end_token=False)
20
  answer_transform = ALBEFTextTransform(do_pre_process=False)
21
 
22
+ vqa_data = json.load(open(data_dir + "vqa_data.json", "r"))
23
  answer_list = json.load(open(data_dir + "answer_list.json", "r"))
24
 
25
+ examples = [[data['image'], data['question']] for data in vqa_data]
26
+
27
  def infer(image, question):
28
  images = [image]
29
  image_input = [image_transform(image) for image in images]
 
52
  fn=infer,
53
  inputs=[gr.Image(label='image', type='pil', image_mode='RGB'), gr.Text(label='question')],
54
  outputs=gr.Text(label='answer'),
55
+ examples=examples
 
 
 
 
 
56
  )
57
 
58
  demo.launch()