Spaces:
Running
Running
File size: 5,270 Bytes
605b3ec a677598 605b3ec a677598 605b3ec 7639fd5 605b3ec 4b86b2a 605b3ec 7fc9a28 605b3ec 2f051b2 605b3ec 7fc9a28 2f051b2 605b3ec 2f051b2 605b3ec 4b86b2a 7fc9a28 4b86b2a 605b3ec 4b86b2a 7fc9a28 d36cc05 713210c a677598 7f7f709 605b3ec 4b86b2a 2f051b2 605b3ec 4b86b2a 2f051b2 605b3ec a677598 7fc9a28 605b3ec a677598 605b3ec 7fc9a28 605b3ec 7fc9a28 605b3ec 7fc9a28 605b3ec a677598 605b3ec 7fc9a28 605b3ec 4b86b2a 605b3ec 4b86b2a 605b3ec 4b86b2a 605b3ec 4b86b2a 7fc9a28 2f051b2 605b3ec 7fc9a28 2f051b2 605b3ec 7639fd5 a677598 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 |
import os
import gradio as gr
from gradio_modal import Modal
from content import HEADER_MARKDOWN, LEADERBOARD_TAB_TITLE_MARKDOWN, SUBMISSION_TAB_TITLE_MARKDOWN, \
ADDITIONAL_NOTES_MARKDOWN, LEADERBOARD_CSS
from leaderboard_server import LeaderboardServer
# Initialize server and task list
DATA_ROOT = os.environ.get("DATA_ROOT")
server = LeaderboardServer(local_leaderboard_path=DATA_ROOT)
TASKS = list(server.tasks_metadata.keys())
EXPECTED_TOKEN = os.environ.get("SUBMISSION_TOKEN")
def get_datasets_for_task(task):
path = os.path.join("references", task)
if not os.path.exists(path):
return []
return [f.replace(".json", "") for f in os.listdir(path) if f.endswith(".json")]
def update_datasets(task):
return gr.CheckboxGroup(choices=get_datasets_for_task(task), value=get_datasets_for_task(task))
def submit_model(task, datasets, hyp_file, submitted_by, model_id, model_link_input, token, normalize):
if not hyp_file:
return gr.update(visible=True, value="β οΈ Please upload a hypothesis file.")
if not submitted_by.strip() or not model_id.strip() or not model_link_input.strip() or not token.strip():
return gr.update(visible=True, value="β οΈ All fields are required.")
if token.strip() != EXPECTED_TOKEN:
return gr.update(visible=True, value="β Invalid submission token.")
metadata = {
"submitted_by": submitted_by.strip(),
"model_id": model_id.strip(),
"model_link": model_link_input.strip(),
"normalize": normalize # Include normalization info in metadata if needed
}
leaderboard_df = server.get_leaderboard(task)
if len(leaderboard_df) > 0:
existing = leaderboard_df[
(leaderboard_df["Submitted by"] == submitted_by.strip()) &
(leaderboard_df["Model ID"] == model_id.strip())
]
# TODO: Uncomment this after testing done
# if not existing.empty:
# return gr.update(value="β A submission with this name and model ID already exists.", visible=True)
try:
server.prepare_model_for_submission(
hyp_file.name, metadata, task, datasets, normalize=normalize
)
return gr.update(visible=True, value="β
Submission successful!")
except Exception as e:
print(e)
return gr.update(visible=True, value=f"β Error: {str(e)}")
def get_leaderboard_df(task):
return server.get_leaderboard(task)
# Gradio UI
with gr.Blocks(css=LEADERBOARD_CSS) as demo:
gr.Markdown(HEADER_MARKDOWN)
with gr.Tabs(selected=0) as tabs:
with gr.Tab("π Leaderboard"):
gr.Markdown(LEADERBOARD_TAB_TITLE_MARKDOWN)
leaderboard_task_dropdown = gr.Dropdown(choices=TASKS, value=TASKS[0], label="Select Task for Leaderboard")
leaderboard_output = gr.components.Dataframe(
datatype=["markdown", "markdown", "float", "float", "float", "float", "float", "float"],
value=lambda: get_leaderboard_df(TASKS[0]),
interactive=False,
label="Leaderboard"
)
leaderboard_task_dropdown.change(fn=get_leaderboard_df, inputs=leaderboard_task_dropdown,
outputs=leaderboard_output)
with gr.Tab("π€ Submit"):
gr.Markdown(SUBMISSION_TAB_TITLE_MARKDOWN)
with gr.Row():
task_dropdown = gr.Dropdown(choices=TASKS, value=TASKS[0], label="Select Task")
dataset_checkboxes = gr.CheckboxGroup(choices=get_datasets_for_task(TASKS[0]), label="Select Datasets",
value=get_datasets_for_task(TASKS[0]))
task_dropdown.change(fn=update_datasets, inputs=task_dropdown, outputs=dataset_checkboxes)
with gr.Row():
submitted_by_input = gr.Text(label="Submitted by")
model_id_input = gr.Text(label="Model Identifier")
model_link_input = gr.Text(label="Model Link", placeholder="Link to model or code repository")
token_input = gr.Text(label="Submission Token", type="password")
hyp_file_upload = gr.File(label="Upload Hypothesis JSON", file_types=[".json"])
normalize_checkbox = gr.Checkbox(label="Apply Chime-8 Normalization", value=True)
submit_btn = gr.Button("Submit")
with Modal("Submission Feedback", visible=False) as loading_msg:
feedback_text = gr.Text(visible=True, label="β³ Processing your submission...")
gr.Markdown(ADDITIONAL_NOTES_MARKDOWN)
submit_btn.click(
lambda: gr.update(visible=True), # Show loading
outputs=loading_msg
).then(
fn=submit_model,
inputs=[task_dropdown, dataset_checkboxes, hyp_file_upload,
submitted_by_input, model_id_input, model_link_input, token_input, normalize_checkbox],
outputs=[feedback_text],
).then(
fn=lambda task: get_leaderboard_df(task),
inputs=task_dropdown,
outputs=leaderboard_output
)
if __name__ == "__main__":
demo.launch()
|