Spaces:
Running
Running
Simon Dürr
commited on
Commit
·
4060d6d
1
Parent(s):
b9e3a1d
add leaderboard
Browse files- app.py +107 -0
- envs.py +25 -0
- leaderboard_data.json +1 -0
app.py
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from gradio_leaderboard import Leaderboard
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
import pandas as pd
|
| 5 |
+
|
| 6 |
+
import os
|
| 7 |
+
|
| 8 |
+
import json
|
| 9 |
+
|
| 10 |
+
from envs import API, EVAL_REQUESTS_PATH, TOKEN, QUEUE_REPO
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def submit(model_name, model_id, challenge, submission_id, architecture, license):
|
| 14 |
+
|
| 15 |
+
if model_name == "" or model_id == "" or challenge == "" or submission_id == "" or architecture == "" or license == "":
|
| 16 |
+
gr.Error("Please fill all the fields")
|
| 17 |
+
return
|
| 18 |
+
try:
|
| 19 |
+
user_name = ""
|
| 20 |
+
if "/" in model_id:
|
| 21 |
+
user_name = model_id.split("/")[0]
|
| 22 |
+
model_path = model_id.split("/")[1]
|
| 23 |
+
|
| 24 |
+
eval_entry = {
|
| 25 |
+
"model_name": model_name,
|
| 26 |
+
"model_id": model_id,
|
| 27 |
+
"challenge": challenge,
|
| 28 |
+
"submission_id": submission_id,
|
| 29 |
+
"architecture": architecture,
|
| 30 |
+
"license": license
|
| 31 |
+
}
|
| 32 |
+
OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
|
| 33 |
+
os.makedirs(OUT_DIR, exist_ok=True)
|
| 34 |
+
out_path = f"{OUT_DIR}/{user_name}_{model_path}.json"
|
| 35 |
+
|
| 36 |
+
with open(out_path, "w") as f:
|
| 37 |
+
f.write(json.dumps(eval_entry))
|
| 38 |
+
|
| 39 |
+
print("Uploading eval file")
|
| 40 |
+
API.upload_file(
|
| 41 |
+
path_or_fileobj=out_path,
|
| 42 |
+
path_in_repo=out_path.split("eval-queue/")[1],
|
| 43 |
+
repo_id=QUEUE_REPO,
|
| 44 |
+
repo_type="dataset",
|
| 45 |
+
commit_message=f"Add {model_name} to eval queue",
|
| 46 |
+
)
|
| 47 |
+
gr.Info("Successfully submitted", duration=10)
|
| 48 |
+
# Remove the local file
|
| 49 |
+
os.remove(out_path)
|
| 50 |
+
except:
|
| 51 |
+
gr.Error("Error submitting the model")
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
abs_path = Path(__file__).parent
|
| 58 |
+
|
| 59 |
+
# Any pandas-compatible data
|
| 60 |
+
df = pd.read_json(str(abs_path / "leaderboard_data.json"))
|
| 61 |
+
|
| 62 |
+
with gr.Blocks() as demo:
|
| 63 |
+
gr.Markdown("""
|
| 64 |
+
# MLSB 2024 Challenges
|
| 65 |
+
""")
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
with gr.Tab("🎖️ PINDER Leaderboard"):
|
| 69 |
+
gr.Markdown("""## PINDER Leaderboard
|
| 70 |
+
Evaluating Protein-Protein interaction prediction
|
| 71 |
+
""")
|
| 72 |
+
Leaderboard(
|
| 73 |
+
value=df,
|
| 74 |
+
select_columns=["Arch", "Model", "L_rms", "I_rms",
|
| 75 |
+
"F_nat", "DOCKQ", "CAPRI"],
|
| 76 |
+
search_columns=["model_name_for_query"],
|
| 77 |
+
hide_columns=["model_name_for_query",],
|
| 78 |
+
filter_columns=["Arch"],
|
| 79 |
+
)
|
| 80 |
+
with gr.Tab("🥇 PLINDER Leaderboard"):
|
| 81 |
+
gr.Markdown("""## PLINDER Leaderboard
|
| 82 |
+
Evaluating Protein-Ligand prediction
|
| 83 |
+
""")
|
| 84 |
+
Leaderboard(
|
| 85 |
+
value=df,
|
| 86 |
+
select_columns=["Arch", "Model", "L_rms", "I_rms",
|
| 87 |
+
"F_nat", "DOCKQ", "CAPRI"],
|
| 88 |
+
search_columns=["model_name_for_query"],
|
| 89 |
+
hide_columns=["model_name_for_query",],
|
| 90 |
+
filter_columns=["Arch"],
|
| 91 |
+
)
|
| 92 |
+
with gr.Tab("✉️ Submit"):
|
| 93 |
+
gr.Markdown("""## Submit your model
|
| 94 |
+
Submit your model to the leaderboard
|
| 95 |
+
""")
|
| 96 |
+
model_name = gr.Textbox(label="Model name")
|
| 97 |
+
model_id = gr.Textbox(label="username/space e.g mlsb/alphafold3")
|
| 98 |
+
challenge = gr.Radio(choices=["PINDER", "PLINDER"],label="Challenge")
|
| 99 |
+
submission_id = gr.Textbox(label="Submission ID on CMT")
|
| 100 |
+
architecture = gr.Dropdown(choices=["GNN", "CNN", "Physics-based", "Other"],label="Model architecture")
|
| 101 |
+
license = gr.Dropdown(choices=["mit", "apache-2.0", "gplv2", "gplv3", "lgpl", "mozilla", "bsd", "other"],label="License")
|
| 102 |
+
submit_btn = gr.Button("Submit")
|
| 103 |
+
|
| 104 |
+
submit_btn.click(submit, inputs=[model_name, model_id, challenge, submission_id, architecture, license], outputs=[])
|
| 105 |
+
|
| 106 |
+
if __name__ == "__main__":
|
| 107 |
+
demo.launch()
|
envs.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
from huggingface_hub import HfApi
|
| 4 |
+
|
| 5 |
+
# Info to change for your repository
|
| 6 |
+
# ----------------------------------
|
| 7 |
+
TOKEN = os.environ.get("HF_TOKEN") # A read/write token for your org
|
| 8 |
+
|
| 9 |
+
OWNER = "MLSB" # Change to your org - don't forget to create a results and request dataset, with the correct format!
|
| 10 |
+
# ----------------------------------
|
| 11 |
+
|
| 12 |
+
REPO_ID = f"{OWNER}/leaderboard2024"
|
| 13 |
+
QUEUE_REPO = f"{OWNER}/requests"
|
| 14 |
+
RESULTS_REPO = f"{OWNER}/results"
|
| 15 |
+
|
| 16 |
+
# If you setup a cache later, just change HF_HOME
|
| 17 |
+
CACHE_PATH=os.getenv("HF_HOME", ".")
|
| 18 |
+
|
| 19 |
+
# Local caches
|
| 20 |
+
EVAL_REQUESTS_PATH = os.path.join(CACHE_PATH, "eval-queue")
|
| 21 |
+
EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results")
|
| 22 |
+
EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
|
| 23 |
+
EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")
|
| 24 |
+
|
| 25 |
+
API = HfApi(token=TOKEN)
|
leaderboard_data.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"Arch":{"0":"GNN"},"Model":{"0":"davidkim205/Rhea-72b-v0.5"},"L_rms":{"0":81.22},"I_rms":{"0":79.78},"F_nat":{"0":91.15},"DOCKQ":{"0":77.95},"CAPRI":{"0":74.5},"Runtime":{"0":"2 +-0.2"},"Hub License":{"0":"apache-2.0"},"#Params (B)":{"0":72.29},"Model sha":{"0":"fda5cf998a0f2d89b53b5fa490793e3e50bb8239"},"model_name_for_query":{"0":"davidkim205\/Rhea-72b-v0.5"}}
|