ryanramos commited on
Commit
fef7d04
·
1 Parent(s): bce74b5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -1
app.py CHANGED
@@ -24,6 +24,9 @@ answer_list = json.load(open(data_dir + "answer_list.json", "r"))
24
 
25
  examples = [[data['image'], data['question']] for data in vqa_data]
26
 
 
 
 
27
  def infer(image, question):
28
  images = [image]
29
  image_input = [image_transform(image) for image in images]
@@ -52,7 +55,9 @@ demo = gr.Interface(
52
  fn=infer,
53
  inputs=[gr.Image(label='image', type='pil', image_mode='RGB'), gr.Text(label='question')],
54
  outputs=gr.Text(label='answer'),
55
- examples=examples
 
 
56
  )
57
 
58
  demo.launch()
 
24
 
25
  examples = [[data['image'], data['question']] for data in vqa_data]
26
 
27
+ title = 'VQA with ALBEF'
28
+ description = 'VQA with [ALBEF](https://arxiv.org/abs/2107.07651), adapted from the [torchmultimodal example notebook](https://github.com/facebookresearch/multimodal/blob/main/examples/albef/vqa_with_albef.ipynb).'
29
+
30
  def infer(image, question):
31
  images = [image]
32
  image_input = [image_transform(image) for image in images]
 
55
  fn=infer,
56
  inputs=[gr.Image(label='image', type='pil', image_mode='RGB'), gr.Text(label='question')],
57
  outputs=gr.Text(label='answer'),
58
+ examples=examples,
59
+ title=title,
60
+ description=description
61
  )
62
 
63
  demo.launch()