tonymwt commited on
Commit
3fae107
·
1 Parent(s): 32871e6

update leaderboard

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
  scale-hf-logo.png filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
  scale-hf-logo.png filter=lfs diff=lfs merge=lfs -text
36
+ *.png filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,13 +1,13 @@
1
  ---
2
- title: VideoEval Pro
3
  emoji: 🥇
4
  colorFrom: green
5
  colorTo: indigo
6
  sdk: gradio
7
  app_file: app.py
8
- pinned: true
9
  license: apache-2.0
10
- short_description: VideoEval-Pro Leaderboard
11
  sdk_version: 5.19.0
12
  ---
13
 
 
1
  ---
2
+ title: VideoEval-Pro Leaderboard
3
  emoji: 🥇
4
  colorFrom: green
5
  colorTo: indigo
6
  sdk: gradio
7
  app_file: app.py
8
+ pinned: false
9
  license: apache-2.0
10
+ short_description: A more robust benchmark for long video understanding.
11
  sdk_version: 5.19.0
12
  ---
13
 
app.py CHANGED
@@ -1,204 +1,125 @@
1
- import gradio as gr
2
- from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns
3
- import pandas as pd
4
- from apscheduler.schedulers.background import BackgroundScheduler
5
- from huggingface_hub import snapshot_download
6
-
7
- from src.about import (
8
- CITATION_BUTTON_LABEL,
9
- CITATION_BUTTON_TEXT,
10
- EVALUATION_QUEUE_TEXT,
11
- INTRODUCTION_TEXT,
12
- LLM_BENCHMARKS_TEXT,
13
- TITLE,
14
- )
15
- from src.display.css_html_js import custom_css
16
- from src.display.utils import (
17
- BENCHMARK_COLS,
18
- COLS,
19
- EVAL_COLS,
20
- EVAL_TYPES,
21
- AutoEvalColumn,
22
- ModelType,
23
- fields,
24
- WeightType,
25
- Precision
26
- )
27
- from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
28
- from src.populate import get_evaluation_queue_df, get_leaderboard_df
29
- from src.submission.submit import add_new_eval
30
-
31
-
32
- def restart_space():
33
- API.restart_space(repo_id=REPO_ID)
34
-
35
- ### Space initialisation
36
- try:
37
- print(EVAL_REQUESTS_PATH)
38
- snapshot_download(
39
- repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
40
- )
41
- except Exception:
42
- restart_space()
43
- try:
44
- print(EVAL_RESULTS_PATH)
45
- snapshot_download(
46
- repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
47
- )
48
- except Exception:
49
- restart_space()
50
-
51
-
52
- LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
53
-
54
- (
55
- finished_eval_queue_df,
56
- running_eval_queue_df,
57
- pending_eval_queue_df,
58
- ) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
59
-
60
- def init_leaderboard(dataframe):
61
- if dataframe is None or dataframe.empty:
62
- raise ValueError("Leaderboard DataFrame is empty or None.")
63
- return Leaderboard(
64
- value=dataframe,
65
- datatype=[c.type for c in fields(AutoEvalColumn)],
66
- select_columns=SelectColumns(
67
- default_selection=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default],
68
- cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden],
69
- label="Select Columns to Display:",
70
- ),
71
- search_columns=[AutoEvalColumn.model.name, AutoEvalColumn.license.name],
72
- hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
73
- filter_columns=[
74
- ColumnFilter(AutoEvalColumn.model_type.name, type="checkboxgroup", label="Model types"),
75
- ColumnFilter(AutoEvalColumn.precision.name, type="checkboxgroup", label="Precision"),
76
- ColumnFilter(
77
- AutoEvalColumn.params.name,
78
- type="slider",
79
- min=0.01,
80
- max=150,
81
- label="Select the number of parameters (B)",
82
- ),
83
- ColumnFilter(
84
- AutoEvalColumn.still_on_hub.name, type="boolean", label="Deleted/incomplete", default=True
85
- ),
86
- ],
87
- bool_checkboxgroup_label="Hide models",
88
- interactive=False,
89
- )
90
-
91
-
92
- demo = gr.Blocks(css=custom_css)
93
- with demo:
94
- gr.HTML(TITLE)
95
- gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
96
-
97
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
98
- with gr.TabItem("🏅 LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
99
- leaderboard = init_leaderboard(LEADERBOARD_DF)
100
-
101
- with gr.TabItem("📝 About", elem_id="llm-benchmark-tab-table", id=2):
102
- gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
103
-
104
- with gr.TabItem("🚀 Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
105
- with gr.Column():
106
- with gr.Row():
107
- gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
108
-
109
- with gr.Column():
110
- with gr.Accordion(
111
- f"✅ Finished Evaluations ({len(finished_eval_queue_df)})",
112
- open=False,
113
- ):
114
- with gr.Row():
115
- finished_eval_table = gr.components.Dataframe(
116
- value=finished_eval_queue_df,
117
- headers=EVAL_COLS,
118
- datatype=EVAL_TYPES,
119
- row_count=5,
120
- )
121
- with gr.Accordion(
122
- f"🔄 Running Evaluation Queue ({len(running_eval_queue_df)})",
123
- open=False,
124
- ):
125
- with gr.Row():
126
- running_eval_table = gr.components.Dataframe(
127
- value=running_eval_queue_df,
128
- headers=EVAL_COLS,
129
- datatype=EVAL_TYPES,
130
- row_count=5,
131
- )
132
-
133
- with gr.Accordion(
134
- f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
135
- open=False,
136
- ):
137
- with gr.Row():
138
- pending_eval_table = gr.components.Dataframe(
139
- value=pending_eval_queue_df,
140
- headers=EVAL_COLS,
141
- datatype=EVAL_TYPES,
142
- row_count=5,
143
- )
144
  with gr.Row():
145
- gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text")
 
 
 
 
 
 
146
 
147
  with gr.Row():
148
- with gr.Column():
149
- model_name_textbox = gr.Textbox(label="Model name")
150
- revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
151
- model_type = gr.Dropdown(
152
- choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
153
- label="Model type",
154
- multiselect=False,
155
- value=None,
156
- interactive=True,
157
- )
158
 
159
- with gr.Column():
160
- precision = gr.Dropdown(
161
- choices=[i.value.name for i in Precision if i != Precision.Unknown],
162
- label="Precision",
163
- multiselect=False,
164
- value="float16",
165
- interactive=True,
166
- )
167
- weight_type = gr.Dropdown(
168
- choices=[i.value.name for i in WeightType],
169
- label="Weights type",
170
- multiselect=False,
171
- value="Original",
172
- interactive=True,
173
- )
174
- base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
 
 
 
 
 
 
 
175
 
176
- submit_button = gr.Button("Submit Eval")
177
- submission_result = gr.Markdown()
178
- submit_button.click(
179
- add_new_eval,
180
- [
181
- model_name_textbox,
182
- base_model_name_textbox,
183
- revision_name_textbox,
184
- precision,
185
- weight_type,
186
- model_type,
187
- ],
188
- submission_result,
 
 
 
189
  )
190
-
191
- with gr.Row():
192
- with gr.Accordion("📙 Citation", open=False):
193
- citation_button = gr.Textbox(
194
- value=CITATION_BUTTON_TEXT,
195
- label=CITATION_BUTTON_LABEL,
196
- lines=20,
197
- elem_id="citation-button",
198
- show_copy_button=True,
 
199
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200
 
201
- scheduler = BackgroundScheduler()
202
- scheduler.add_job(restart_space, "interval", seconds=1800)
203
- scheduler.start()
204
- demo.queue(default_concurrency_limit=40).launch()
 
1
+ from utils import *
2
+
3
+ global data_component
4
+
5
+ def update_table(query, min_size, max_size, selected_tasks=None, selected_type="All"):
6
+ df = get_df()
7
+ if selected_type and selected_type != "All":
8
+ df = df[df["Type"] == selected_type]
9
+ filtered_df = search_and_filter_models(df, query, min_size, max_size)
10
+ if selected_tasks and len(selected_tasks) > 0:
11
+ selected_columns = BASE_COLS + selected_tasks
12
+ filtered_df = filtered_df[selected_columns]
13
+ return filtered_df
14
+
15
+ with gr.Blocks() as block:
16
+ gr.Markdown(LEADERBOARD_INTRODUCTION)
17
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
19
+ # Table 1
20
+ with gr.TabItem("📊 VideoEval-Pro", elem_id="qa-tab-table1", id=1):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  with gr.Row():
22
+ with gr.Accordion("Citation", open=False):
23
+ citation_button = gr.Textbox(
24
+ value=CITATION_BUTTON_TEXT,
25
+ label=CITATION_BUTTON_LABEL,
26
+ elem_id="citation-button",
27
+ lines=10,
28
+ )
29
 
30
  with gr.Row():
31
+ search_bar = gr.Textbox(
32
+ placeholder="Search models...",
33
+ show_label=False,
34
+ elem_id="search-bar"
35
+ )
36
+
37
+ df = get_df()
38
+ min_size, max_size = get_size_range(df)
 
 
39
 
40
+ with gr.Row():
41
+ min_size_slider = gr.Slider(
42
+ minimum=min_size,
43
+ maximum=max_size,
44
+ value=min_size,
45
+ step=0.1,
46
+ label="Minimum number of parameters (B)",
47
+ )
48
+ max_size_slider = gr.Slider(
49
+ minimum=min_size,
50
+ maximum=max_size,
51
+ value=max_size,
52
+ step=0.1,
53
+ label="Maximum number of parameters (B)",
54
+ )
55
+
56
+ with gr.Row():
57
+ type_select = gr.Dropdown(
58
+ choices=["All", "Proprietary", "Open-source"],
59
+ value="All",
60
+ label="Select Model Type",
61
+ elem_id="type-select"
62
+ )
63
 
64
+ with gr.Row():
65
+ tasks_select = gr.CheckboxGroup(
66
+ choices=TASKS,
67
+ value=OPEN_TASKS,
68
+ label="Select tasks to Display",
69
+ elem_id="tasks-select"
70
+ )
71
+
72
+ data_component = gr.components.Dataframe(
73
+ value=df[DEFAULT_NAMES],
74
+ headers=DEFAULT_NAMES,
75
+ type="pandas",
76
+ datatype=DATA_TITLE_TYPE,
77
+ interactive=False,
78
+ visible=True,
79
+ max_height=2400,
80
  )
81
+
82
+ refresh_button = gr.Button("Refresh")
83
+
84
+ def update_with_tasks(*args):
85
+ return update_table(*args)
86
+
87
+ search_bar.change(
88
+ fn=update_with_tasks,
89
+ inputs=[search_bar, min_size_slider, max_size_slider, tasks_select, type_select],
90
+ outputs=data_component
91
  )
92
+ min_size_slider.change(
93
+ fn=update_with_tasks,
94
+ inputs=[search_bar, min_size_slider, max_size_slider, tasks_select, type_select],
95
+ outputs=data_component
96
+ )
97
+ max_size_slider.change(
98
+ fn=update_with_tasks,
99
+ inputs=[search_bar, min_size_slider, max_size_slider, tasks_select, type_select],
100
+ outputs=data_component
101
+ )
102
+ tasks_select.change(
103
+ fn=update_with_tasks,
104
+ inputs=[search_bar, min_size_slider, max_size_slider, tasks_select, type_select],
105
+ outputs=data_component
106
+ )
107
+ type_select.change(
108
+ fn=update_with_tasks,
109
+ inputs=[search_bar, min_size_slider, max_size_slider, tasks_select, type_select],
110
+ outputs=data_component
111
+ )
112
+ refresh_button.click(fn=refresh_data, outputs=data_component)
113
+ gr.Markdown(TABLE_INTRODUCTION)
114
+
115
+ # table 2
116
+ with gr.TabItem("📝 About", elem_id="qa-tab-table2", id=2):
117
+ gr.Markdown(LEADERBOARD_INFO, elem_classes="markdown-text")
118
+ gr.Image("dataset_statistics.png", width=900, label="Dataset Statistics")
119
+
120
+ # table 3
121
+ with gr.TabItem("🚀 Submit here! ", elem_id="submit-tab", id=3):
122
+ with gr.Row():
123
+ gr.Markdown(SUBMIT_INTRODUCTION, elem_classes="markdown-text")
124
 
125
+ block.launch()
 
 
 
dataset_statistics.png ADDED

Git LFS Details

  • SHA256: 1233b724ce6737e3bf3a0add383be881b3c22d7690a00d10ffbde32791ea854f
  • Pointer size: 131 Bytes
  • Size of remote file: 120 kB
results.jsonl ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"Models": "GPT-4o", "Model Size(B)": "-", "Frames": 256, "Type": "Proprietary", "URL": "https://openai.com/index/hello-gpt-4o/", "LP_Open": 39.4, "LP_MCQ": 64.8, "LR_Open": 23.1, "LR_MCQ": 62.6, "HP_Open": 26.4, "HP_MCQ": 42.1, "HR_Open": 29.2, "HR_MCQ": 50.4, "Overall_Open": 34.2, "Overall_MCQ": 59.5}
2
+ {"Models": "Gemini-1.5-Flash", "Model Size(B)": "-", "Frames": 512, "Type": "Proprietary", "URL": "https://storage.googleapis.com/deepmind-media/gemini/gemini_v1_5_report.pdf", "LP_Open": 41.5, "LP_MCQ": 65.5, "LR_Open": 25.9, "LR_MCQ": 63.9, "HP_Open": 27.3, "HP_MCQ": 36.4, "HR_Open": 25.8, "HR_MCQ": 55.7, "Overall_Open": 35.1, "Overall_MCQ": 60.6}
3
+ {"Models": "Gemini-2.5-Flash", "Model Size(B)": "-", "Frames": 256, "Type": "Proprietary", "URL": "https://blog.google/technology/google-deepmind/gemini-model-thinking-updates-march-2025/", "LP_Open": 42.4, "LP_MCQ": 64.1, "LR_Open": 30.6, "LR_MCQ": 65.3, "HP_Open": 25.6, "HP_MCQ": 33.9, "HR_Open": 26.9, "HR_MCQ": 54.2, "Overall_Open": 36.3, "Overall_MCQ": 59.3}
4
+ {"Models": "Gemini-1.5-Pro", "Model Size(B)": "-", "Frames": 512, "Type": "Proprietary", "URL": "https://storage.googleapis.com/deepmind-media/gemini/gemini_v1_5_report.pdf", "LP_Open": 43.7, "LP_MCQ": 66.7, "LR_Open": 32.7, "LR_MCQ": 69.4, "HP_Open": 35.5, "HP_MCQ": 40.5, "HR_Open": 31.8, "HR_MCQ": 61.0, "Overall_Open": 39.3, "Overall_MCQ": 63.4}
5
+ {"Models": "GPT-4.1-mini", "Model Size(B)": "-", "Frames": 256, "Type": "Proprietary", "URL": "https://openai.com/index/gpt-4-1/", "LP_Open": 46.0, "LP_MCQ": 68.6, "LR_Open": 32.0, "LR_MCQ": 68.7, "HP_Open": 27.3, "HP_MCQ": 38.8, "HR_Open": 32.6, "HR_MCQ": 57.6, "Overall_Open": 39.9, "Overall_MCQ": 63.5}
6
+ {"Models": "GPT-4.1", "Model Size(B)": "-", "Frames": 256, "Type": "Proprietary", "URL": "https://openai.com/index/gpt-4-1/", "LP_Open": 47.2, "LP_MCQ": 68.8, "LR_Open": 29.9, "LR_MCQ": 68.7, "HP_Open": 28.1, "HP_MCQ": 38.0, "HR_Open": 34.5, "HR_MCQ": 59.5, "Overall_Open": 40.8, "Overall_MCQ": 64.0}
7
+ {"Models": "Video-LLaVA", "Model Size(B)": "8", "Frames": 8, "Type": "Open-source", "URL": "https://github.com/PKU-YuanGroup/Video-LLaVA", "LP_Open": 13.2, "LP_MCQ": 27.5, "LR_Open": 6.1, "LR_MCQ": 33.3, "HP_Open": 14.0, "HP_MCQ": 24.8, "HR_Open": 6.1, "HR_MCQ": 26.5, "Overall_Open": 11.0, "Overall_MCQ": 27.7}
8
+ {"Models": "Mantis-Idefics2", "Model Size(B)": "8", "Frames": 24, "Type": "Open-source", "URL": "https://arxiv.org/abs/2405.01483", "LP_Open": 17.8, "LP_MCQ": 33.2, "LR_Open": 9.5, "LR_MCQ": 29.9, "HP_Open": 16.5, "HP_MCQ": 16.5, "HR_Open": 8.3, "HR_MCQ": 29.9, "Overall_Open": 14.8, "Overall_MCQ": 30.6}
9
+ {"Models": "LongVA", "Model Size(B)": "7", "Frames": 64, "Type": "Open-source", "URL": "https://arxiv.org/abs/2406.16852", "LP_Open": 20.5, "LP_MCQ": 43.3, "LR_Open": 6.8, "LR_MCQ": 33.3, "HP_Open": 19.0, "HP_MCQ": 24.0, "HR_Open": 9.5, "HR_MCQ": 31.8, "Overall_Open": 16.5, "Overall_MCQ": 38.0}
10
+ {"Models": "Phi-4-Mini", "Model Size(B)": "5.6", "Frames": 128, "Type": "Open-source", "URL": "https://arxiv.org/abs/2503.01743", "LP_Open": 19.2, "LP_MCQ": 46.4, "LR_Open": 12.9, "LR_MCQ": 47.6, "HP_Open": 18.2, "HP_MCQ": 30.6, "HR_Open": 10.2, "HR_MCQ": 31.4, "Overall_Open": 16.5, "Overall_MCQ": 42.0}
11
+ {"Models": "LongLLaVA", "Model Size(B)": "9", "Frames": 512, "Type": "Open-source", "URL": "https://huggingface.co/aws-prototyping/long-llava-qwen2-7b", "LP_Open": 21.7, "LP_MCQ": 41.2, "LR_Open": 15.0, "LR_MCQ": 34.0, "HP_Open": 14.0, "HP_MCQ": 29.8, "HR_Open": 10.2, "HR_MCQ": 29.2, "Overall_Open": 17.8, "Overall_MCQ": 36.9}
12
+ {"Models": "Video-XL", "Model Size(B)": "7", "Frames": 512, "Type": "Open-source", "URL": "https://github.com/VectorSpaceLab/Video-XL", "LP_Open": 22.3, "LP_MCQ": 41.9, "LR_Open": 15.0, "LR_MCQ": 34.0, "HP_Open": 18.2, "HP_MCQ": 28.1, "HR_Open": 10.2, "HR_MCQ": 29.2, "Overall_Open": 18.6, "Overall_MCQ": 38.2}
13
+ {"Models": "LongVU", "Model Size(B)": "7", "Frames": 512, "Type": "Open-source", "URL": "https://arxiv.org/abs/2410.17434", "LP_Open": 25.9, "LP_MCQ": 45.6, "LR_Open": 12.9, "LR_MCQ": 38.8, "HP_Open": 19.8, "HP_MCQ": 24.0, "HR_Open": 17.4, "HR_MCQ": 37.1, "Overall_Open": 22.1, "Overall_MCQ": 41.0}
14
+ {"Models": "Vamba", "Model Size(B)": "10", "Frames": 512, "Type": "Open-source", "URL": "https://arxiv.org/abs/2503.11579", "LP_Open": 28.1, "LP_MCQ": 52.4, "LR_Open": 10.9, "LR_MCQ": 40.8, "HP_Open": 21.5, "HP_MCQ": 26.4, "HR_Open": 12.5, "HR_MCQ": 37.9, "Overall_Open": 22.3, "Overall_MCQ": 45.7}
15
+ {"Models": "LLaVA-Video", "Model Size(B)": "7", "Frames": 64, "Type": "Open-source", "URL": "https://huggingface.co/lmms-lab/LLaVA-NeXT-Video-72B-Qwen2", "LP_Open": 28.5, "LP_MCQ": 53.5, "LR_Open": 13.6, "LR_MCQ": 47.6, "HP_Open": 20.7, "HP_MCQ": 28.9, "HR_Open": 19.3, "HR_MCQ": 40.2, "Overall_Open": 24.2, "Overall_MCQ": 47.8}
16
+ {"Models": "InternVL2.5", "Model Size(B)": "8", "Frames": 64, "Type": "Open-source", "URL": "https://internvl.github.io/blog/2024-12-05-InternVL-2.5/", "LP_Open": 28.8, "LP_MCQ": 54.3, "LR_Open": 19.7, "LR_MCQ": 46.3, "HP_Open": 21.5, "HP_MCQ": 35.5, "HR_Open": 16.7, "HR_MCQ": 39.0, "Overall_Open": 24.6, "Overall_MCQ": 48.5}
17
+ {"Models": "InternVL3", "Model Size(B)": "8", "Frames": 64, "Type": "Open-source", "URL": "https://arxiv.org/abs/2504.10479", "LP_Open": 30.3, "LP_MCQ": 54.6, "LR_Open": 17.0, "LR_MCQ": 49.0, "HP_Open": 24.0, "HP_MCQ": 34.7, "HR_Open": 13.3, "HR_MCQ": 36.7, "Overall_Open": 24.7, "Overall_MCQ": 48.4}
18
+ {"Models": "Qwen2-VL", "Model Size(B)": "7", "Frames": 512, "Type": "Open-source", "URL": "https://github.com/QwenLM/Qwen2-VL", "LP_Open": 31.7, "LP_MCQ": 59.3, "LR_Open": 14.3, "LR_MCQ": 51.7, "HP_Open": 21.5, "HP_MCQ": 28.1, "HR_Open": 20.5, "HR_MCQ": 39.0, "Overall_Open": 26.5, "Overall_MCQ": 48.2}
19
+ {"Models": "InternVideo2.5", "Model Size(B)": "8", "Frames": 512, "Type": "Open-source", "URL": "https://arxiv.org/abs/2501.12386", "LP_Open": 33.6, "LP_MCQ": 59.8, "LR_Open": 17.0, "LR_MCQ": 47.6, "HP_Open": 19.8, "HP_MCQ": 34.7, "HR_Open": 18.2, "HR_MCQ": 45.8, "Overall_Open": 27.2, "Overall_MCQ": 53.2}
20
+ {"Models": "VideoChat-Flash", "Model Size(B)": "7", "Frames": 512, "Type": "Open-source", "URL": "https://github.com/OpenGVLab/VideoChat-Flash", "LP_Open": 33.3, "LP_MCQ": 57.7, "LR_Open": 16.3, "LR_MCQ": 43.5, "HP_Open": 21.5, "HP_MCQ": 33.9, "HR_Open": 17.4, "HR_MCQ": 44.7, "Overall_Open": 27.0, "Overall_MCQ": 51.2}
21
+ {"Models": "Qwen2.5-VL", "Model Size(B)": "7", "Frames": 512, "Type": "Open-source", "URL": "https://arxiv.org/abs/2502.13923", "LP_Open": 33.9, "LP_MCQ": 51.7, "LR_Open": 15.6, "LR_MCQ": 48.3, "HP_Open": 24.8, "HP_MCQ": 31.4, "HR_Open": 17.8, "HR_MCQ": 39.8, "Overall_Open": 27.7, "Overall_MCQ": 46.9}
src/about.py DELETED
@@ -1,72 +0,0 @@
1
- from dataclasses import dataclass
2
- from enum import Enum
3
-
4
- @dataclass
5
- class Task:
6
- benchmark: str
7
- metric: str
8
- col_name: str
9
-
10
-
11
- # Select your tasks here
12
- # ---------------------------------------------------
13
- class Tasks(Enum):
14
- # task_key in the json file, metric_key in the json file, name to display in the leaderboard
15
- task0 = Task("anli_r1", "acc", "ANLI")
16
- task1 = Task("logiqa", "acc_norm", "LogiQA")
17
-
18
- NUM_FEWSHOT = 0 # Change with your few shot
19
- # ---------------------------------------------------
20
-
21
-
22
-
23
- # Your leaderboard name
24
- TITLE = """<h1 align="center" id="space-title">Demo leaderboard</h1>"""
25
-
26
- # What does your leaderboard evaluate?
27
- INTRODUCTION_TEXT = """
28
- Intro text
29
- """
30
-
31
- # Which evaluations are you running? how can people reproduce what you have?
32
- LLM_BENCHMARKS_TEXT = f"""
33
- ## How it works
34
-
35
- ## Reproducibility
36
- To reproduce our results, here is the commands you can run:
37
-
38
- """
39
-
40
- EVALUATION_QUEUE_TEXT = """
41
- ## Some good practices before submitting a model
42
-
43
- ### 1) Make sure you can load your model and tokenizer using AutoClasses:
44
- ```python
45
- from transformers import AutoConfig, AutoModel, AutoTokenizer
46
- config = AutoConfig.from_pretrained("your model name", revision=revision)
47
- model = AutoModel.from_pretrained("your model name", revision=revision)
48
- tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision)
49
- ```
50
- If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded.
51
-
52
- Note: make sure your model is public!
53
- Note: if your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted!
54
-
55
- ### 2) Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index)
56
- It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`!
57
-
58
- ### 3) Make sure your model has an open license!
59
- This is a leaderboard for Open LLMs, and we'd love for as many people as possible to know they can use your model 🤗
60
-
61
- ### 4) Fill up your model card
62
- When we add extra information about models to the leaderboard, it will be automatically taken from the model card
63
-
64
- ## In case of model failure
65
- If your model is displayed in the `FAILED` category, its execution stopped.
66
- Make sure you have followed the above steps first.
67
- If everything is done, check you can launch the EleutherAIHarness on your model locally, using the above command without modifications (you can add `--limit` to limit the number of examples per task).
68
- """
69
-
70
- CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
71
- CITATION_BUTTON_TEXT = r"""
72
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/display/css_html_js.py DELETED
@@ -1,105 +0,0 @@
1
- custom_css = """
2
-
3
- .markdown-text {
4
- font-size: 16px !important;
5
- }
6
-
7
- #models-to-add-text {
8
- font-size: 18px !important;
9
- }
10
-
11
- #citation-button span {
12
- font-size: 16px !important;
13
- }
14
-
15
- #citation-button textarea {
16
- font-size: 16px !important;
17
- }
18
-
19
- #citation-button > label > button {
20
- margin: 6px;
21
- transform: scale(1.3);
22
- }
23
-
24
- #leaderboard-table {
25
- margin-top: 15px
26
- }
27
-
28
- #leaderboard-table-lite {
29
- margin-top: 15px
30
- }
31
-
32
- #search-bar-table-box > div:first-child {
33
- background: none;
34
- border: none;
35
- }
36
-
37
- #search-bar {
38
- padding: 0px;
39
- }
40
-
41
- /* Limit the width of the first AutoEvalColumn so that names don't expand too much */
42
- #leaderboard-table td:nth-child(2),
43
- #leaderboard-table th:nth-child(2) {
44
- max-width: 400px;
45
- overflow: auto;
46
- white-space: nowrap;
47
- }
48
-
49
- .tab-buttons button {
50
- font-size: 20px;
51
- }
52
-
53
- #scale-logo {
54
- border-style: none !important;
55
- box-shadow: none;
56
- display: block;
57
- margin-left: auto;
58
- margin-right: auto;
59
- max-width: 600px;
60
- }
61
-
62
- #scale-logo .download {
63
- display: none;
64
- }
65
- #filter_type{
66
- border: 0;
67
- padding-left: 0;
68
- padding-top: 0;
69
- }
70
- #filter_type label {
71
- display: flex;
72
- }
73
- #filter_type label > span{
74
- margin-top: var(--spacing-lg);
75
- margin-right: 0.5em;
76
- }
77
- #filter_type label > .wrap{
78
- width: 103px;
79
- }
80
- #filter_type label > .wrap .wrap-inner{
81
- padding: 2px;
82
- }
83
- #filter_type label > .wrap .wrap-inner input{
84
- width: 1px
85
- }
86
- #filter-columns-type{
87
- border:0;
88
- padding:0.5;
89
- }
90
- #filter-columns-size{
91
- border:0;
92
- padding:0.5;
93
- }
94
- #box-filter > .form{
95
- border: 0
96
- }
97
- """
98
-
99
- get_window_url_params = """
100
- function(url_params) {
101
- const params = new URLSearchParams(window.location.search);
102
- url_params = Object.fromEntries(params);
103
- return url_params;
104
- }
105
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/display/formatting.py DELETED
@@ -1,27 +0,0 @@
1
- def model_hyperlink(link, model_name):
2
- return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
3
-
4
-
5
- def make_clickable_model(model_name):
6
- link = f"https://huggingface.co/{model_name}"
7
- return model_hyperlink(link, model_name)
8
-
9
-
10
- def styled_error(error):
11
- return f"<p style='color: red; font-size: 20px; text-align: center;'>{error}</p>"
12
-
13
-
14
- def styled_warning(warn):
15
- return f"<p style='color: orange; font-size: 20px; text-align: center;'>{warn}</p>"
16
-
17
-
18
- def styled_message(message):
19
- return f"<p style='color: green; font-size: 20px; text-align: center;'>{message}</p>"
20
-
21
-
22
- def has_no_nan_values(df, columns):
23
- return df[columns].notna().all(axis=1)
24
-
25
-
26
- def has_nan_values(df, columns):
27
- return df[columns].isna().any(axis=1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/display/utils.py DELETED
@@ -1,110 +0,0 @@
1
- from dataclasses import dataclass, make_dataclass
2
- from enum import Enum
3
-
4
- import pandas as pd
5
-
6
- from src.about import Tasks
7
-
8
- def fields(raw_class):
9
- return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
10
-
11
-
12
- # These classes are for user facing column names,
13
- # to avoid having to change them all around the code
14
- # when a modif is needed
15
- @dataclass
16
- class ColumnContent:
17
- name: str
18
- type: str
19
- displayed_by_default: bool
20
- hidden: bool = False
21
- never_hidden: bool = False
22
-
23
- ## Leaderboard columns
24
- auto_eval_column_dict = []
25
- # Init
26
- auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
27
- auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
28
- #Scores
29
- auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", True)])
30
- for task in Tasks:
31
- auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
32
- # Model information
33
- auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
34
- auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
35
- auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, True)])
36
- auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False)])
37
- auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False)])
38
- auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)])
39
- auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False)])
40
- auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
41
- auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
42
-
43
- # We use make dataclass to dynamically fill the scores from Tasks
44
- AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
45
-
46
- ## For the queue columns in the submission tab
47
- @dataclass(frozen=True)
48
- class EvalQueueColumn: # Queue column
49
- model = ColumnContent("model", "markdown", True)
50
- revision = ColumnContent("revision", "str", True)
51
- private = ColumnContent("private", "bool", True)
52
- precision = ColumnContent("precision", "str", True)
53
- weight_type = ColumnContent("weight_type", "str", "Original")
54
- status = ColumnContent("status", "str", True)
55
-
56
- ## All the model information that we might need
57
- @dataclass
58
- class ModelDetails:
59
- name: str
60
- display_name: str = ""
61
- symbol: str = "" # emoji
62
-
63
-
64
- class ModelType(Enum):
65
- PT = ModelDetails(name="pretrained", symbol="🟢")
66
- FT = ModelDetails(name="fine-tuned", symbol="🔶")
67
- IFT = ModelDetails(name="instruction-tuned", symbol="⭕")
68
- RL = ModelDetails(name="RL-tuned", symbol="🟦")
69
- Unknown = ModelDetails(name="", symbol="?")
70
-
71
- def to_str(self, separator=" "):
72
- return f"{self.value.symbol}{separator}{self.value.name}"
73
-
74
- @staticmethod
75
- def from_str(type):
76
- if "fine-tuned" in type or "🔶" in type:
77
- return ModelType.FT
78
- if "pretrained" in type or "🟢" in type:
79
- return ModelType.PT
80
- if "RL-tuned" in type or "🟦" in type:
81
- return ModelType.RL
82
- if "instruction-tuned" in type or "⭕" in type:
83
- return ModelType.IFT
84
- return ModelType.Unknown
85
-
86
- class WeightType(Enum):
87
- Adapter = ModelDetails("Adapter")
88
- Original = ModelDetails("Original")
89
- Delta = ModelDetails("Delta")
90
-
91
- class Precision(Enum):
92
- float16 = ModelDetails("float16")
93
- bfloat16 = ModelDetails("bfloat16")
94
- Unknown = ModelDetails("?")
95
-
96
- def from_str(precision):
97
- if precision in ["torch.float16", "float16"]:
98
- return Precision.float16
99
- if precision in ["torch.bfloat16", "bfloat16"]:
100
- return Precision.bfloat16
101
- return Precision.Unknown
102
-
103
- # Column selection
104
- COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden]
105
-
106
- EVAL_COLS = [c.name for c in fields(EvalQueueColumn)]
107
- EVAL_TYPES = [c.type for c in fields(EvalQueueColumn)]
108
-
109
- BENCHMARK_COLS = [t.value.col_name for t in Tasks]
110
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/envs.py DELETED
@@ -1,25 +0,0 @@
1
- import os
2
-
3
- from huggingface_hub import HfApi
4
-
5
- # Info to change for your repository
6
- # ----------------------------------
7
- TOKEN = os.environ.get("HF_TOKEN") # A read/write token for your org
8
-
9
- OWNER = "demo-leaderboard-backend" # Change to your org - don't forget to create a results and request dataset, with the correct format!
10
- # ----------------------------------
11
-
12
- REPO_ID = f"{OWNER}/leaderboard"
13
- QUEUE_REPO = f"{OWNER}/requests"
14
- RESULTS_REPO = f"{OWNER}/results"
15
-
16
- # If you setup a cache later, just change HF_HOME
17
- CACHE_PATH=os.getenv("HF_HOME", ".")
18
-
19
- # Local caches
20
- EVAL_REQUESTS_PATH = os.path.join(CACHE_PATH, "eval-queue")
21
- EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results")
22
- EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
23
- EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")
24
-
25
- API = HfApi(token=TOKEN)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/leaderboard/read_evals.py DELETED
@@ -1,196 +0,0 @@
1
- import glob
2
- import json
3
- import math
4
- import os
5
- from dataclasses import dataclass
6
-
7
- import dateutil
8
- import numpy as np
9
-
10
- from src.display.formatting import make_clickable_model
11
- from src.display.utils import AutoEvalColumn, ModelType, Tasks, Precision, WeightType
12
- from src.submission.check_validity import is_model_on_hub
13
-
14
-
15
- @dataclass
16
- class EvalResult:
17
- """Represents one full evaluation. Built from a combination of the result and request file for a given run.
18
- """
19
- eval_name: str # org_model_precision (uid)
20
- full_model: str # org/model (path on hub)
21
- org: str
22
- model: str
23
- revision: str # commit hash, "" if main
24
- results: dict
25
- precision: Precision = Precision.Unknown
26
- model_type: ModelType = ModelType.Unknown # Pretrained, fine tuned, ...
27
- weight_type: WeightType = WeightType.Original # Original or Adapter
28
- architecture: str = "Unknown"
29
- license: str = "?"
30
- likes: int = 0
31
- num_params: int = 0
32
- date: str = "" # submission date of request file
33
- still_on_hub: bool = False
34
-
35
- @classmethod
36
- def init_from_json_file(self, json_filepath):
37
- """Inits the result from the specific model result file"""
38
- with open(json_filepath) as fp:
39
- data = json.load(fp)
40
-
41
- config = data.get("config")
42
-
43
- # Precision
44
- precision = Precision.from_str(config.get("model_dtype"))
45
-
46
- # Get model and org
47
- org_and_model = config.get("model_name", config.get("model_args", None))
48
- org_and_model = org_and_model.split("/", 1)
49
-
50
- if len(org_and_model) == 1:
51
- org = None
52
- model = org_and_model[0]
53
- result_key = f"{model}_{precision.value.name}"
54
- else:
55
- org = org_and_model[0]
56
- model = org_and_model[1]
57
- result_key = f"{org}_{model}_{precision.value.name}"
58
- full_model = "/".join(org_and_model)
59
-
60
- still_on_hub, _, model_config = is_model_on_hub(
61
- full_model, config.get("model_sha", "main"), trust_remote_code=True, test_tokenizer=False
62
- )
63
- architecture = "?"
64
- if model_config is not None:
65
- architectures = getattr(model_config, "architectures", None)
66
- if architectures:
67
- architecture = ";".join(architectures)
68
-
69
- # Extract results available in this file (some results are split in several files)
70
- results = {}
71
- for task in Tasks:
72
- task = task.value
73
-
74
- # We average all scores of a given metric (not all metrics are present in all files)
75
- accs = np.array([v.get(task.metric, None) for k, v in data["results"].items() if task.benchmark == k])
76
- if accs.size == 0 or any([acc is None for acc in accs]):
77
- continue
78
-
79
- mean_acc = np.mean(accs) * 100.0
80
- results[task.benchmark] = mean_acc
81
-
82
- return self(
83
- eval_name=result_key,
84
- full_model=full_model,
85
- org=org,
86
- model=model,
87
- results=results,
88
- precision=precision,
89
- revision= config.get("model_sha", ""),
90
- still_on_hub=still_on_hub,
91
- architecture=architecture
92
- )
93
-
94
- def update_with_request_file(self, requests_path):
95
- """Finds the relevant request file for the current model and updates info with it"""
96
- request_file = get_request_file_for_model(requests_path, self.full_model, self.precision.value.name)
97
-
98
- try:
99
- with open(request_file, "r") as f:
100
- request = json.load(f)
101
- self.model_type = ModelType.from_str(request.get("model_type", ""))
102
- self.weight_type = WeightType[request.get("weight_type", "Original")]
103
- self.license = request.get("license", "?")
104
- self.likes = request.get("likes", 0)
105
- self.num_params = request.get("params", 0)
106
- self.date = request.get("submitted_time", "")
107
- except Exception:
108
- print(f"Could not find request file for {self.org}/{self.model} with precision {self.precision.value.name}")
109
-
110
- def to_dict(self):
111
- """Converts the Eval Result to a dict compatible with our dataframe display"""
112
- average = sum([v for v in self.results.values() if v is not None]) / len(Tasks)
113
- data_dict = {
114
- "eval_name": self.eval_name, # not a column, just a save name,
115
- AutoEvalColumn.precision.name: self.precision.value.name,
116
- AutoEvalColumn.model_type.name: self.model_type.value.name,
117
- AutoEvalColumn.model_type_symbol.name: self.model_type.value.symbol,
118
- AutoEvalColumn.weight_type.name: self.weight_type.value.name,
119
- AutoEvalColumn.architecture.name: self.architecture,
120
- AutoEvalColumn.model.name: make_clickable_model(self.full_model),
121
- AutoEvalColumn.revision.name: self.revision,
122
- AutoEvalColumn.average.name: average,
123
- AutoEvalColumn.license.name: self.license,
124
- AutoEvalColumn.likes.name: self.likes,
125
- AutoEvalColumn.params.name: self.num_params,
126
- AutoEvalColumn.still_on_hub.name: self.still_on_hub,
127
- }
128
-
129
- for task in Tasks:
130
- data_dict[task.value.col_name] = self.results[task.value.benchmark]
131
-
132
- return data_dict
133
-
134
-
135
- def get_request_file_for_model(requests_path, model_name, precision):
136
- """Selects the correct request file for a given model. Only keeps runs tagged as FINISHED"""
137
- request_files = os.path.join(
138
- requests_path,
139
- f"{model_name}_eval_request_*.json",
140
- )
141
- request_files = glob.glob(request_files)
142
-
143
- # Select correct request file (precision)
144
- request_file = ""
145
- request_files = sorted(request_files, reverse=True)
146
- for tmp_request_file in request_files:
147
- with open(tmp_request_file, "r") as f:
148
- req_content = json.load(f)
149
- if (
150
- req_content["status"] in ["FINISHED"]
151
- and req_content["precision"] == precision.split(".")[-1]
152
- ):
153
- request_file = tmp_request_file
154
- return request_file
155
-
156
-
157
- def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResult]:
158
- """From the path of the results folder root, extract all needed info for results"""
159
- model_result_filepaths = []
160
-
161
- for root, _, files in os.walk(results_path):
162
- # We should only have json files in model results
163
- if len(files) == 0 or any([not f.endswith(".json") for f in files]):
164
- continue
165
-
166
- # Sort the files by date
167
- try:
168
- files.sort(key=lambda x: x.removesuffix(".json").removeprefix("results_")[:-7])
169
- except dateutil.parser._parser.ParserError:
170
- files = [files[-1]]
171
-
172
- for file in files:
173
- model_result_filepaths.append(os.path.join(root, file))
174
-
175
- eval_results = {}
176
- for model_result_filepath in model_result_filepaths:
177
- # Creation of result
178
- eval_result = EvalResult.init_from_json_file(model_result_filepath)
179
- eval_result.update_with_request_file(requests_path)
180
-
181
- # Store results of same eval together
182
- eval_name = eval_result.eval_name
183
- if eval_name in eval_results.keys():
184
- eval_results[eval_name].results.update({k: v for k, v in eval_result.results.items() if v is not None})
185
- else:
186
- eval_results[eval_name] = eval_result
187
-
188
- results = []
189
- for v in eval_results.values():
190
- try:
191
- v.to_dict() # we test if the dict version is complete
192
- results.append(v)
193
- except KeyError: # not all eval values present
194
- continue
195
-
196
- return results
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/populate.py DELETED
@@ -1,58 +0,0 @@
1
- import json
2
- import os
3
-
4
- import pandas as pd
5
-
6
- from src.display.formatting import has_no_nan_values, make_clickable_model
7
- from src.display.utils import AutoEvalColumn, EvalQueueColumn
8
- from src.leaderboard.read_evals import get_raw_eval_results
9
-
10
-
11
- def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
12
- """Creates a dataframe from all the individual experiment results"""
13
- raw_data = get_raw_eval_results(results_path, requests_path)
14
- all_data_json = [v.to_dict() for v in raw_data]
15
-
16
- df = pd.DataFrame.from_records(all_data_json)
17
- df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
18
- df = df[cols].round(decimals=2)
19
-
20
- # filter out if any of the benchmarks have not been produced
21
- df = df[has_no_nan_values(df, benchmark_cols)]
22
- return df
23
-
24
-
25
- def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
26
- """Creates the different dataframes for the evaluation queues requestes"""
27
- entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")]
28
- all_evals = []
29
-
30
- for entry in entries:
31
- if ".json" in entry:
32
- file_path = os.path.join(save_path, entry)
33
- with open(file_path) as fp:
34
- data = json.load(fp)
35
-
36
- data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
37
- data[EvalQueueColumn.revision.name] = data.get("revision", "main")
38
-
39
- all_evals.append(data)
40
- elif ".md" not in entry:
41
- # this is a folder
42
- sub_entries = [e for e in os.listdir(f"{save_path}/{entry}") if os.path.isfile(e) and not e.startswith(".")]
43
- for sub_entry in sub_entries:
44
- file_path = os.path.join(save_path, entry, sub_entry)
45
- with open(file_path) as fp:
46
- data = json.load(fp)
47
-
48
- data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
49
- data[EvalQueueColumn.revision.name] = data.get("revision", "main")
50
- all_evals.append(data)
51
-
52
- pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]]
53
- running_list = [e for e in all_evals if e["status"] == "RUNNING"]
54
- finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"]
55
- df_pending = pd.DataFrame.from_records(pending_list, columns=cols)
56
- df_running = pd.DataFrame.from_records(running_list, columns=cols)
57
- df_finished = pd.DataFrame.from_records(finished_list, columns=cols)
58
- return df_finished[cols], df_running[cols], df_pending[cols]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/submission/check_validity.py DELETED
@@ -1,99 +0,0 @@
1
- import json
2
- import os
3
- import re
4
- from collections import defaultdict
5
- from datetime import datetime, timedelta, timezone
6
-
7
- import huggingface_hub
8
- from huggingface_hub import ModelCard
9
- from huggingface_hub.hf_api import ModelInfo
10
- from transformers import AutoConfig
11
- from transformers.models.auto.tokenization_auto import AutoTokenizer
12
-
13
- def check_model_card(repo_id: str) -> tuple[bool, str]:
14
- """Checks if the model card and license exist and have been filled"""
15
- try:
16
- card = ModelCard.load(repo_id)
17
- except huggingface_hub.utils.EntryNotFoundError:
18
- return False, "Please add a model card to your model to explain how you trained/fine-tuned it."
19
-
20
- # Enforce license metadata
21
- if card.data.license is None:
22
- if not ("license_name" in card.data and "license_link" in card.data):
23
- return False, (
24
- "License not found. Please add a license to your model card using the `license` metadata or a"
25
- " `license_name`/`license_link` pair."
26
- )
27
-
28
- # Enforce card content
29
- if len(card.text) < 200:
30
- return False, "Please add a description to your model card, it is too short."
31
-
32
- return True, ""
33
-
34
- def is_model_on_hub(model_name: str, revision: str, token: str = None, trust_remote_code=False, test_tokenizer=False) -> tuple[bool, str]:
35
- """Checks if the model model_name is on the hub, and whether it (and its tokenizer) can be loaded with AutoClasses."""
36
- try:
37
- config = AutoConfig.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
38
- if test_tokenizer:
39
- try:
40
- tk = AutoTokenizer.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
41
- except ValueError as e:
42
- return (
43
- False,
44
- f"uses a tokenizer which is not in a transformers release: {e}",
45
- None
46
- )
47
- except Exception as e:
48
- return (False, "'s tokenizer cannot be loaded. Is your tokenizer class in a stable transformers release, and correctly configured?", None)
49
- return True, None, config
50
-
51
- except ValueError:
52
- return (
53
- False,
54
- "needs to be launched with `trust_remote_code=True`. For safety reason, we do not allow these models to be automatically submitted to the leaderboard.",
55
- None
56
- )
57
-
58
- except Exception as e:
59
- return False, "was not found on hub!", None
60
-
61
-
62
- def get_model_size(model_info: ModelInfo, precision: str):
63
- """Gets the model size from the configuration, or the model name if the configuration does not contain the information."""
64
- try:
65
- model_size = round(model_info.safetensors["total"] / 1e9, 3)
66
- except (AttributeError, TypeError):
67
- return 0 # Unknown model sizes are indicated as 0, see NUMERIC_INTERVALS in app.py
68
-
69
- size_factor = 8 if (precision == "GPTQ" or "gptq" in model_info.modelId.lower()) else 1
70
- model_size = size_factor * model_size
71
- return model_size
72
-
73
- def get_model_arch(model_info: ModelInfo):
74
- """Gets the model architecture from the configuration"""
75
- return model_info.config.get("architectures", "Unknown")
76
-
77
- def already_submitted_models(requested_models_dir: str) -> set[str]:
78
- """Gather a list of already submitted models to avoid duplicates"""
79
- depth = 1
80
- file_names = []
81
- users_to_submission_dates = defaultdict(list)
82
-
83
- for root, _, files in os.walk(requested_models_dir):
84
- current_depth = root.count(os.sep) - requested_models_dir.count(os.sep)
85
- if current_depth == depth:
86
- for file in files:
87
- if not file.endswith(".json"):
88
- continue
89
- with open(os.path.join(root, file), "r") as f:
90
- info = json.load(f)
91
- file_names.append(f"{info['model']}_{info['revision']}_{info['precision']}")
92
-
93
- # Select organisation
94
- if info["model"].count("/") == 0 or "submitted_time" not in info:
95
- continue
96
- organisation, _ = info["model"].split("/")
97
- users_to_submission_dates[organisation].append(info["submitted_time"])
98
-
99
- return set(file_names), users_to_submission_dates
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/submission/submit.py DELETED
@@ -1,119 +0,0 @@
1
- import json
2
- import os
3
- from datetime import datetime, timezone
4
-
5
- from src.display.formatting import styled_error, styled_message, styled_warning
6
- from src.envs import API, EVAL_REQUESTS_PATH, TOKEN, QUEUE_REPO
7
- from src.submission.check_validity import (
8
- already_submitted_models,
9
- check_model_card,
10
- get_model_size,
11
- is_model_on_hub,
12
- )
13
-
14
- REQUESTED_MODELS = None
15
- USERS_TO_SUBMISSION_DATES = None
16
-
17
- def add_new_eval(
18
- model: str,
19
- base_model: str,
20
- revision: str,
21
- precision: str,
22
- weight_type: str,
23
- model_type: str,
24
- ):
25
- global REQUESTED_MODELS
26
- global USERS_TO_SUBMISSION_DATES
27
- if not REQUESTED_MODELS:
28
- REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
29
-
30
- user_name = ""
31
- model_path = model
32
- if "/" in model:
33
- user_name = model.split("/")[0]
34
- model_path = model.split("/")[1]
35
-
36
- precision = precision.split(" ")[0]
37
- current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
38
-
39
- if model_type is None or model_type == "":
40
- return styled_error("Please select a model type.")
41
-
42
- # Does the model actually exist?
43
- if revision == "":
44
- revision = "main"
45
-
46
- # Is the model on the hub?
47
- if weight_type in ["Delta", "Adapter"]:
48
- base_model_on_hub, error, _ = is_model_on_hub(model_name=base_model, revision=revision, token=TOKEN, test_tokenizer=True)
49
- if not base_model_on_hub:
50
- return styled_error(f'Base model "{base_model}" {error}')
51
-
52
- if not weight_type == "Adapter":
53
- model_on_hub, error, _ = is_model_on_hub(model_name=model, revision=revision, token=TOKEN, test_tokenizer=True)
54
- if not model_on_hub:
55
- return styled_error(f'Model "{model}" {error}')
56
-
57
- # Is the model info correctly filled?
58
- try:
59
- model_info = API.model_info(repo_id=model, revision=revision)
60
- except Exception:
61
- return styled_error("Could not get your model information. Please fill it up properly.")
62
-
63
- model_size = get_model_size(model_info=model_info, precision=precision)
64
-
65
- # Were the model card and license filled?
66
- try:
67
- license = model_info.cardData["license"]
68
- except Exception:
69
- return styled_error("Please select a license for your model")
70
-
71
- modelcard_OK, error_msg = check_model_card(model)
72
- if not modelcard_OK:
73
- return styled_error(error_msg)
74
-
75
- # Seems good, creating the eval
76
- print("Adding new eval")
77
-
78
- eval_entry = {
79
- "model": model,
80
- "base_model": base_model,
81
- "revision": revision,
82
- "precision": precision,
83
- "weight_type": weight_type,
84
- "status": "PENDING",
85
- "submitted_time": current_time,
86
- "model_type": model_type,
87
- "likes": model_info.likes,
88
- "params": model_size,
89
- "license": license,
90
- "private": False,
91
- }
92
-
93
- # Check for duplicate submission
94
- if f"{model}_{revision}_{precision}" in REQUESTED_MODELS:
95
- return styled_warning("This model has been already submitted.")
96
-
97
- print("Creating eval file")
98
- OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
99
- os.makedirs(OUT_DIR, exist_ok=True)
100
- out_path = f"{OUT_DIR}/{model_path}_eval_request_False_{precision}_{weight_type}.json"
101
-
102
- with open(out_path, "w") as f:
103
- f.write(json.dumps(eval_entry))
104
-
105
- print("Uploading eval file")
106
- API.upload_file(
107
- path_or_fileobj=out_path,
108
- path_in_repo=out_path.split("eval-queue/")[1],
109
- repo_id=QUEUE_REPO,
110
- repo_type="dataset",
111
- commit_message=f"Add {model} to eval queue",
112
- )
113
-
114
- # Remove the local file
115
- os.remove(out_path)
116
-
117
- return styled_message(
118
- "Your request has been submitted to the evaluation queue!\nPlease wait for up to an hour for the model to show in the PENDING list."
119
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
utils.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import gradio as gr
3
+ import csv
4
+ import json
5
+ import os
6
+ import requests
7
+ import io
8
+ import shutil
9
+ from huggingface_hub import Repository
10
+
11
+ HF_TOKEN = os.environ.get("HF_TOKEN")
12
+
13
+ BASE_COLS = ["Rank", "Models", "Model Size(B)", "Type", "Frames"]
14
+
15
+ TASKS = ["LP_Open", "LP_MCQ", "LR_Open", "LR_MCQ", "HP_Open", "HP_MCQ", "HR_Open", "HR_MCQ", "Overall_Open", "Overall_MCQ"]
16
+ OPEN_TASKS = ["LP_Open", "LR_Open", "HP_Open", "HR_Open", "Overall_Open"]
17
+ MCQ_TASKS = ["LP_MCQ", "LR_MCQ", "HP_MCQ", "HR_MCQ", "Overall_MCQ"]
18
+
19
+ DEFAULT_NAMES = BASE_COLS + OPEN_TASKS
20
+ COLUMN_NAMES = BASE_COLS + TASKS
21
+
22
+ GROUP_FIELD = "Type" # "Proprietary" or "Open-source"
23
+
24
+ DATA_TITLE_TYPE = ['number', 'markdown', 'str', 'number', 'str', 'str'] + \
25
+ ['number'] * len(TASKS)
26
+
27
+ LEADERBOARD_INTRODUCTION = """
28
+ # 🥇 **VideoEval-Pro Leaderboard**
29
+ ### A More Robust and Realistic QA Evaluation benchmark of Multi-modal LLMs in long video understanding
30
+ ## Introduction
31
+ Do existing long video benchmarks faithfully reflect model's real capacity to understand long video content? Do the gains reported by newer models genuinely translate into stronger long video comprehension capability, or are they illusional? To probe these questions, we present VideoEval-Pro, a more robust and realistic long video understanding benchmark containing open-ended, short-answer QA problems. To construct VideoEval-Pro, we source the questions from four existing long video understanding MCQ benchmarks, and reformat these questions into free-form questions. We apply a series of filtering methods based on video duration, question and answer type, answerability and QA difficulty to ensure the quality of our benchmark. Our final benchmark contains a total of 1,289 short-answer questions based on 465 videos, with an average duration of 38 minutes. \n
32
+ | [**📈Overview**](https://tiger-ai-lab.github.io/VideoEval-Pro)
33
+ | [**👨‍💻Github**](https://github.com/TIGER-AI-Lab/VideoEval-Pro)
34
+ | [**📖VideoEval-Pro Paper**](https://arxiv.org/abs/2505.14640)
35
+ | [**🤗HuggingFace**](https://huggingface.co/datasets/TIGER-Lab/VideoEval-Pro) |
36
+ """
37
+
38
+ TABLE_INTRODUCTION = """Models are ranked based on Overall_Open."""
39
+
40
+ LEADERBOARD_INFO = """
41
+ ## Dataset Statistics and Tasks Info
42
+ * Local Perception (LP): LP focuses on identifying and retrieving visual elements or actions from a short video clip in a long video. Subtypes in this category include Segment QA, Needle-InA-Haystack (NIAH) QA, Attribute Perception, Action Recognition, Object Recognition, Entity Recognition, Key Information Retrieval and a combined Other subtype.
43
+ * Local Reasoning (LR): LR focuses on reasoning within short temporal windows, such as inferring causality, temporal order, or changes that happen over a local sequence of events. The four subtypes in this category are Egocentric Video Reasoning, Object Reasoning, Temporal Reasoning and Action Reasoning.
44
+ * Holistic Perception (HP): HP involves a global and holistic understanding of statistical, structural, or spatial information, typically requiring visual aggregation. In VIDEOEVAL-PRO, HP is comprised of Visual Counting problems.
45
+ * Holistic Reasoning (HR): HR requires abstract or high-level understanding of long videos across events or scenes, often involving narrative or intent understanding. The two subtypes for HR are Event Understanding and Plot Reasoning.
46
+ """
47
+
48
+ CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
49
+ CITATION_BUTTON_TEXT = r"""@misc{ma2025videoevalprorobustrealisticlong,
50
+ title={VideoEval-Pro: Robust and Realistic Long Video Understanding Evaluation},
51
+ author={Wentao Ma and Weiming Ren and Yiming Jia and Zhuofeng Li and Ping Nie and Ge Zhang and Wenhu Chen},
52
+ year={2025},
53
+ eprint={2505.14640},
54
+ archivePrefix={arXiv},
55
+ primaryClass={cs.CV},
56
+ url={https://arxiv.org/abs/2505.14640},
57
+ }"""
58
+
59
+ SUBMIT_INTRODUCTION = """# Submit on VideoEval-Pro Leaderboard Introduction
60
+ ## The evaluattion model should be used is *GPT-4o-0806*
61
+ ## ⚠ Please note that you need to submit the JSON file with the following format:
62
+ ```json
63
+ [
64
+ {
65
+ "Models": "<Model Name>",
66
+ "Model Size(B)": "100 or -",
67
+ "Frames": "<Number of Frames>",
68
+ "Type": "Proprietary or Open-source",
69
+ "URL": "<Model URL>" or null,
70
+ "LP_Open": 50.0 or null,
71
+ "LP_MCQ": 50.0 or null,
72
+ "LR_Open": 50.0 or null,
73
+ "LR_MCQ": 50.0 or null,
74
+ "HP_Open": 50.0 or null,
75
+ "HP_MCQ": 50.0 or null,
76
+ "HR_Open": 50.0 or null,
77
+ "HR_MCQ": 50.0 or null,
78
+ "Overall_Open": 50.0,
79
+ "Overall_MCQ": 50.0,
80
+ },
81
+ ]
82
+ ```
83
+
84
+ You may refer to the [**GitHub page**](https://github.com/TIGER-AI-Lab/VideoEval-Pro) for instructions about evaluating your model. \n
85
+ Please send us an email at tonyyyma@gmail.com, attaching the JSON file. We will review your submission and update the leaderboard accordingly.
86
+ """
87
+
88
+ def create_hyperlinked_names(df):
89
+ def convert_url(url, model_name):
90
+ return f'<a href="{url}">{model_name}</a>' if url is not None else model_name
91
+
92
+ def add_link_to_model_name(row):
93
+ row['Models'] = convert_url(row['URL'], row['Models'])
94
+ return row
95
+
96
+ df = df.copy()
97
+ df = df.apply(add_link_to_model_name, axis=1)
98
+ return df
99
+
100
+ # def fetch_data(file: str) -> pd.DataFrame:
101
+ # # fetch the leaderboard data from remote
102
+ # if file is None:
103
+ # raise ValueError("URL Not Provided")
104
+ # url = f"https://huggingface.co/spaces/TIGER-Lab/MMEB/resolve/main/{file}"
105
+ # print(f"Fetching data from {url}")
106
+ # response = requests.get(url)
107
+ # if response.status_code != 200:
108
+ # raise requests.HTTPError(f"Failed to fetch data: HTTP status code {response.status_code}")
109
+ # return pd.read_json(io.StringIO(response.text), orient='records', lines=True)
110
+
111
+ def get_df(file="results.jsonl"):
112
+ df = pd.read_json(file, orient='records', lines=True)
113
+ df['Model Size(B)'] = df['Model Size(B)'].apply(process_model_size)
114
+ for task in TASKS:
115
+ if df[task].isnull().any():
116
+ df[task] = df[task].apply(lambda score: '-' if pd.isna(score) else score)
117
+ df = df.sort_values(by=['Overall_Open'], ascending=False)
118
+ df = create_hyperlinked_names(df)
119
+ df['Rank'] = range(1, len(df) + 1)
120
+ return df
121
+
122
+ def refresh_data():
123
+ df = get_df()
124
+ return df[DEFAULT_NAMES]
125
+
126
+ def search_and_filter_models(df, query, min_size, max_size):
127
+ filtered_df = df.copy()
128
+
129
+ if query:
130
+ filtered_df = filtered_df[filtered_df['Models'].str.contains(query, case=False, na=False)]
131
+
132
+ size_mask = filtered_df['Model Size(B)'].apply(lambda x:
133
+ (min_size <= max_size) if x == 'unknown'
134
+ else (min_size <= x <= max_size))
135
+
136
+ filtered_df = filtered_df[size_mask]
137
+
138
+ return filtered_df[COLUMN_NAMES]
139
+
140
+
141
+ def search_models(df, query):
142
+ if query:
143
+ return df[df['Models'].str.contains(query, case=False, na=False)]
144
+ return df
145
+
146
+ def get_size_range(df):
147
+ sizes = df['Model Size(B)'].apply(lambda x: 0.0 if x == 'unknown' else x)
148
+ if (sizes == 0.0).all():
149
+ return 0.0, 1000.0
150
+ return float(sizes.min()), float(sizes.max())
151
+
152
+
153
+ def process_model_size(size):
154
+ if pd.isna(size) or size == 'unk':
155
+ return 'unknown'
156
+ try:
157
+ val = float(size)
158
+ return val
159
+ except (ValueError, TypeError):
160
+ return 'unknown'
161
+
162
+ def filter_columns_by_tasks(df, selected_tasks=None):
163
+ if selected_tasks is None or len(selected_tasks) == 0:
164
+ return df[COLUMN_NAMES]
165
+
166
+ base_columns = ['Models', 'Model Size(B)', 'Frames', 'Type', 'Overall_Open']
167
+ selected_columns = base_columns + selected_tasks
168
+
169
+ available_columns = [col for col in selected_columns if col in df.columns]
170
+ return df[available_columns]