tohid.abedini commited on
Commit
54fdeab
·
1 Parent(s): b24fdd3

[Add] new files

Browse files
Files changed (6) hide show
  1. README.md +6 -5
  2. app.py +178 -0
  3. envs.py +25 -0
  4. leaderboard_pinder.json +1 -0
  5. leaderboard_plinder.json +1 -0
  6. requirements.txt +1 -0
README.md CHANGED
@@ -1,12 +1,13 @@
1
  ---
2
- title: Llm Leaderboard 2
3
- emoji: 🏃
4
- colorFrom: pink
5
- colorTo: green
6
  sdk: gradio
7
- sdk_version: 5.5.0
8
  app_file: app.py
9
  pinned: false
 
10
  short_description: Part LLM Leaderboard
11
  ---
12
 
 
1
  ---
2
+ title: LLM Leaderboard
3
+ emoji: 🏅
4
+ colorFrom: red
5
+ colorTo: red
6
  sdk: gradio
7
+ sdk_version: 4.42.0
8
  app_file: app.py
9
  pinned: false
10
+ license: mit
11
  short_description: Part LLM Leaderboard
12
  ---
13
 
app.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from gradio_leaderboard import Leaderboard
3
+ from pathlib import Path
4
+ import pandas as pd
5
+
6
+ import os
7
+
8
+ import json
9
+
10
+ import requests
11
+
12
+ from envs import API, EVAL_REQUESTS_PATH, TOKEN, QUEUE_REPO
13
+
14
+
15
+ def fill_form(model_name, model_id, contact_email, challenge, submission_id, paper_link, architecture, license):
16
+ value = {
17
+ # Model name
18
+ "entry.1591601824": model_name,
19
+ # username/space
20
+ "entry.1171388028": model_id,
21
+ # Submission ID on CMT
22
+ "entry.171528970": submission_id,
23
+ # Preprint or paper link
24
+ "entry.1284338508": paper_link,
25
+ # Model architecture
26
+ "entry.1291571256": architecture,
27
+ # License
28
+ # Option: any text
29
+ "entry.272554778": license,
30
+ # Challenge
31
+ # Option: any text
32
+ "entry.1908975677": challenge,
33
+ # Email
34
+ # Option: any text
35
+ "entry.964644151": contact_email
36
+ }
37
+
38
+ return value
39
+
40
+ def sendForm(url, data):
41
+ try:
42
+ requests.post(url, data = data)
43
+ print("Submitted successfully!")
44
+ except:
45
+ print("Error!")
46
+
47
+ def submit(model_name, model_id, contact_email, challenge, submission_id, paper_link, architecture, license):
48
+
49
+ if model_name == "" or model_id == "" or challenge == "" or architecture == "" or license == "":
50
+ gr.Error("Please fill all the fields")
51
+ return
52
+ if submission_id == "" and paper_link =="":
53
+ gr.Error("Provide either a link to a paper describing the method or a submission ID for the MLSB workshop.")
54
+ return
55
+ try:
56
+ user_name = ""
57
+ if "/" in model_id:
58
+ user_name = model_id.split("/")[0]
59
+ model_path = model_id.split("/")[1]
60
+
61
+ eval_entry = {
62
+ "model_name": model_name,
63
+ "model_id": model_id,
64
+ "challenge": challenge,
65
+ "submission_id": submission_id,
66
+ "architecture": architecture,
67
+ "license": license
68
+ }
69
+ OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
70
+ os.makedirs(OUT_DIR, exist_ok=True)
71
+ out_path = f"{OUT_DIR}/{user_name}_{model_path}.json"
72
+
73
+ with open(out_path, "w") as f:
74
+ f.write(json.dumps(eval_entry))
75
+
76
+ print("Sending form")
77
+ formData = fill_form(model_name, model_id, contact_email, challenge, submission_id, paper_link, architecture, license)
78
+ sendForm("https://docs.google.com/forms/d/e/1FAIpQLSf1zP7RAFC5RLlva03xm0eIAPLKXOmMvNUzirbm82kdCUFKNw/formResponse", formData)
79
+
80
+ print("Uploading eval file")
81
+ API.upload_file(
82
+ path_or_fileobj=out_path,
83
+ path_in_repo=out_path.split("eval-queue/")[1],
84
+ repo_id=QUEUE_REPO,
85
+ repo_type="dataset",
86
+ commit_message=f"Add {model_name} to eval queue",
87
+ )
88
+
89
+ gr.Info("Successfully submitted", duration=10)
90
+ # Remove the local file
91
+ os.remove(out_path)
92
+ except:
93
+ gr.Error("Error submitting the model")
94
+
95
+
96
+
97
+
98
+
99
+ abs_path = Path(__file__).parent
100
+
101
+ # Any pandas-compatible data
102
+ pinder_df = pd.read_json(str(abs_path / "leaderboard_pinder.json"))
103
+ plinder_df = pd.read_json(str(abs_path / "leaderboard_plinder.json"))
104
+
105
+ with gr.Blocks() as demo:
106
+ gr.Markdown("""
107
+ # MLSB 2024 Leaderboard
108
+ """)
109
+
110
+
111
+ with gr.Tab("🎖️ PINDER Leaderboard"):
112
+ gr.Markdown("""## PINDER Leaderboard
113
+ Evaluating Protein-Protein interaction prediction
114
+ """)
115
+ Leaderboard(
116
+ value=pinder_df,
117
+ select_columns=["Arch", "Model", "L_rms", "I_rms",
118
+ "F_nat", "DOCKQ", "CAPRI"],
119
+ search_columns=["model_name_for_query"],
120
+ hide_columns=["model_name_for_query",],
121
+ filter_columns=["Arch"],
122
+ )
123
+ with gr.Tab("🥇 PLINDER Leaderboard"):
124
+ gr.Markdown("""## PLINDER Leaderboard
125
+ Evaluating Protein-Ligand prediction
126
+ """)
127
+ Leaderboard(
128
+ value=plinder_df,
129
+ select_columns=["Arch", "Model", "Mean lDDT-PLI", "Mean lDDT-LP",
130
+ "Median RMSD", "% lDDT-PLI >= 0.5", "% pass PoseBusters"],
131
+ search_columns=["model_name_for_query"],
132
+ hide_columns=["model_name_for_query",],
133
+ filter_columns=["Arch"],
134
+ )
135
+ with gr.Tab("✉️ Submit"):
136
+ gr.Markdown("""## Submit your model
137
+ Submit your model to the leaderboard using the below form AFTER following the following steps:
138
+ - Create a HuggingFace account and request to join the [MLSB organization](https://huggingface.co/MLSB)
139
+ - Create a new space in the MLSB organization and add your model using the inference templates: https://huggingface.co/new-space?owner=MLSB
140
+ - Fill the submission form.
141
+
142
+ ## Prerequisites:
143
+ To qualify for submission, each team must:
144
+ - Provide an MLSB submission ID (find it on CMT) or a link to a preprint/paper describing their methodology. This publication does not have to specifically report training or evaluation on the P(L)INDER dataset. Previously published methods, such as DiffDock, only need to link their existing paper. Note that entry into this competition does not equate to an MLSB workshop paper submission.
145
+ - Create a copy of the provided [inference templates](https://huggingface.co/MLSB/).
146
+ - Go to the top right corner of the page of the respective inference template and click on the drop-down menu (vertical ellipsis) right next to the “Community”, then select “Duplicate this space”.
147
+ - Change files in the newly create space to reflect the peculiarities of your model
148
+ - Edit `requirements.txt` to capture all python dependencies.
149
+ - Modify the Dockerfile as appropriate (including selecting the right base image)
150
+ - Include a `inference_app.py` file. This contains a `predict` function that should be modified to reflect the specifics of inference using their model.
151
+ - Include a `train.py` file to ensure that training and model selection use only the PINDER/PLINDER datasets and to clearly show any additional hyperparameters used.
152
+ - Provide a LICENSE file that allows for reuse, derivative works, and distribution of the provided software and weights (e.g., MIT or Apache2 license).
153
+ - Submit to the leaderboard via the [form below](https://huggingface.co/spaces/MLSB/leaderboard2024).
154
+ - On submission page, add reference to the newly created space in the format username/space (e.g mlsb/alphafold3). You can create the space on your personal Huggingface account and transfer it to MLSB for the submission to get a GPU assigned.
155
+
156
+ After a brief technical review by our organizers we will grant you a free GPU until MLSB so that anyone can play with the model and we will run the evaluation.
157
+
158
+ If you have a questions please email: workshopmlsb@gmail.com
159
+ """)
160
+ model_name = gr.Textbox(label="Model name")
161
+ model_id = gr.Textbox(label="username/space e.g mlsb/alphafold3")
162
+ contact_email = gr.Textbox(label="Contact E-Mail")
163
+ challenge = gr.Radio(choices=["PINDER", "PLINDER"],label="Challenge")
164
+ gr.Markdown("Either give a submission id if you submitted to the MLSB workshop or provide a link to the preprint/paper describing the method.")
165
+ with gr.Row():
166
+ submission_id = gr.Textbox(label="Submission ID on CMT")
167
+ paper_link = gr.Textbox(label="Preprint or Paper link")
168
+ architecture = gr.Dropdown(choices=["GNN", "CNN","Diffusion Model", "Physics-based", "Other"],label="Model architecture")
169
+ license = gr.Dropdown(choices=["mit", "apache-2.0", "gplv2", "gplv3", "lgpl", "mozilla", "bsd", "other"],label="License")
170
+ submit_btn = gr.Button("Submit")
171
+
172
+ submit_btn.click(submit, inputs=[model_name, model_id, contact_email, challenge, submission_id, paper_link, architecture, license], outputs=[])
173
+
174
+ gr.Markdown("""
175
+ Please find more information about the challenges on [mlsb.io/#challenge](https://mlsb.io/#challenge)""")
176
+
177
+ if __name__ == "__main__":
178
+ demo.launch()
envs.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from huggingface_hub import HfApi
4
+
5
+ # Info to change for your repository
6
+ # ----------------------------------
7
+ TOKEN = os.environ.get("HF_TOKEN") # A read/write token for your org
8
+
9
+ OWNER = "MLSB" # Change to your org - don't forget to create a results and request dataset, with the correct format!
10
+ # ----------------------------------
11
+
12
+ REPO_ID = f"{OWNER}/leaderboard2024"
13
+ QUEUE_REPO = f"{OWNER}/requests"
14
+ RESULTS_REPO = f"{OWNER}/results"
15
+
16
+ # If you setup a cache later, just change HF_HOME
17
+ CACHE_PATH=os.getenv("HF_HOME", ".")
18
+
19
+ # Local caches
20
+ EVAL_REQUESTS_PATH = os.path.join(CACHE_PATH, "eval-queue")
21
+ EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results")
22
+ EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
23
+ EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")
24
+
25
+ API = HfApi(token=TOKEN)
leaderboard_pinder.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"Arch":{"0":"GNN"},"Model":{"0":"example/example"},"L_rms":{"0":81.22},"I_rms":{"0":79.78},"F_nat":{"0":91.15},"DOCKQ":{"0":77.95},"CAPRI":{"0":74.5},"Runtime":{"0":"2 +-0.2"},"Hub License":{"0":"apache-2.0"},"#Params (B)":{"0":72.29},"Model sha":{"0":"fda5cf998a0f2d89b53b5fa490793e3e50bb8239"},"model_name_for_query":{"0":"example/example"}}
leaderboard_plinder.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"Arch":{"0":"GNN"},"Model":{"0":"example/example"},"Mean lDDT-PLI":{"0":0.2}, "Mean lDDT-LP":{"0":0.8}, "Median RMSD":{"0":5.78}, "% lDDT-PLI >= 0.5":{"0":20.78}, "% pass PoseBusters":{"0":58.47},"Runtime":{"0":"2 +-0.2"},"Hub License":{"0":"apache-2.0"},"#Params (B)":{"0":72.29},"Model sha":{"0":"fda5cf998a0f2d89b53b5fa490793e3e50bb8239"},"model_name_for_query":{"0":"example/example"}}
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ gradio_leaderboard