Spaces:
Sleeping
Sleeping
File size: 3,848 Bytes
b98f07f 3b3db42 91e8a06 ceb2102 dbd8502 b98f07f 1ba1924 5fe3b95 1ba1924 ceb2102 b98f07f ceb2102 ad128a8 dbd8502 a18de40 ad128a8 25c6939 ad128a8 25c6939 ad128a8 b98f07f 4103566 b98f07f 818f024 5fe3b95 b98f07f 9f4fde3 b98f07f 9f4fde3 b98f07f 9f4fde3 b98f07f 9f4fde3 b98f07f 9f4fde3 b98f07f 9f4fde3 b98f07f abebeac b98f07f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 |
import json
import os
import pandas as pd
from src.display.formatting import has_no_nan_values, make_clickable_model
from src.display.utils import AutoEvalColumn, EvalQueueColumn
from src.leaderboard.read_evals import get_raw_eval_results
from src.about import Tasks
def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
"""Creates a dataframe from all the individual experiment results"""
raw_data = get_raw_eval_results(results_path, requests_path)
all_data_json = [v.to_dict() for v in raw_data]
df = pd.DataFrame.from_records(all_data_json)
# Handle empty DataFrame case
if df.empty:
# Create empty DataFrame with correct columns
df = pd.DataFrame(columns=cols)
return df
# Sort by the first task (EMEA NER) since we don't have an average for NER tasks
# If no results exist yet, just sort by model name
first_task = list(Tasks)[0] # emea_ner
task_col_name = first_task.value.col_name # Use the col_name directly
if task_col_name in df.columns:
df = df.sort_values(by=[task_col_name], ascending=False)
else:
# Fallback to sorting by model name if no task results yet
df = df.sort_values(by=["Model"], ascending=True)
# Only select columns that exist in the DataFrame
available_cols = [col for col in cols if col in df.columns]
df = df[available_cols].round(decimals=2)
# filter out if any of the benchmarks have not been produced
df = df[has_no_nan_values(df, benchmark_cols)]
return df
def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
"""Creates the different dataframes for the evaluation queues requestes"""
entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")]
all_evals = []
for entry in entries:
if ".json" in entry:
file_path = os.path.join(save_path, entry)
try:
with open(file_path) as fp:
data = json.load(fp)
data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
data[EvalQueueColumn.revision.name] = data.get("revision", "main")
all_evals.append(data)
except (json.JSONDecodeError, KeyError, IOError) as e:
print(f"Error processing {file_path}: {e}")
continue
elif ".md" not in entry:
# this is a folder
sub_entries = [e for e in os.listdir(os.path.join(save_path, entry))
if os.path.isfile(os.path.join(save_path, entry, e)) and not e.startswith(".")]
for sub_entry in sub_entries:
file_path = os.path.join(save_path, entry, sub_entry)
try:
with open(file_path) as fp:
data = json.load(fp)
data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
data[EvalQueueColumn.revision.name] = data.get("revision", "main")
all_evals.append(data)
except (json.JSONDecodeError, KeyError, IOError) as e:
print(f"Error processing {file_path}: {e}")
continue
pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]]
running_list = [e for e in all_evals if e["status"] == "RUNNING"]
finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"]
df_pending = pd.DataFrame.from_records(pending_list, columns=cols)
df_running = pd.DataFrame.from_records(running_list, columns=cols)
df_finished = pd.DataFrame.from_records(finished_list, columns=cols)
return df_finished[cols], df_running[cols], df_pending[cols]
|