|
import gradio as gr |
|
import pandas as pd |
|
import os |
|
import sys |
|
import traceback |
|
import logging |
|
|
|
|
|
os.environ['CURL_CA_BUNDLE'] = '' |
|
|
|
|
|
logging.basicConfig( |
|
level=logging.INFO, |
|
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' |
|
) |
|
|
|
logging.getLogger("httpx").setLevel(logging.ERROR) |
|
logging.getLogger("urllib3").setLevel(logging.ERROR) |
|
logging.getLogger("matplotlib").setLevel(logging.WARNING) |
|
logging.getLogger("huggingface_hub").setLevel(logging.ERROR) |
|
|
|
from gradio.oauth import OAuthProfile |
|
|
|
from src.display.about import ( |
|
CITATION_BUTTON_LABEL, |
|
CITATION_BUTTON_TEXT, |
|
EVALUATION_QUEUE_TEXT, |
|
INTRODUCTION_TEXT, |
|
LLM_BENCHMARKS_TEXT, |
|
TITLE, |
|
) |
|
|
|
from src.display.css_html_js import custom_css |
|
|
|
from src.utils import ( |
|
restart_space, |
|
load_benchmark_results, |
|
create_benchmark_plots, |
|
create_combined_leaderboard_table, |
|
create_evalmix_table, |
|
create_light_eval_table, |
|
create_raw_details_table, |
|
create_human_arena_table, |
|
update_supported_base_models |
|
) |
|
|
|
|
|
from pipelines.utils.common import search_and_filter |
|
from pipelines.unified_benchmark import submit_unified_benchmark |
|
|
|
|
|
EVAL_TYPES = ["EvalMix", "RAG-Judge", "Light-Eval", "Arena", "Snake-Bench"] |
|
|
|
|
|
OAUTH_CLIENT_ID = os.getenv("OAUTH_CLIENT_ID") |
|
OAUTH_CLIENT_SECRET = os.getenv("OAUTH_CLIENT_SECRET") |
|
OAUTH_SCOPES = os.getenv("OAUTH_SCOPES", "email") |
|
OPENID_PROVIDER_URL = os.getenv("OPENID_PROVIDER_URL") |
|
SESSION_TIMEOUT_MINUTES = int(os.getenv("HF_OAUTH_EXPIRATION_MINUTES", 30)) |
|
|
|
def format_dataframe(df, is_light_eval_detail=False): |
|
""" |
|
Float değerleri 2 ondalık basamağa yuvarla, |
|
'file' sütununu kaldır ve kolon isimlerini düzgün formata getir |
|
|
|
Args: |
|
df: DataFrame to format |
|
is_light_eval_detail: If True, use 4 decimal places for light eval detail results |
|
""" |
|
if df.empty: |
|
return df |
|
|
|
|
|
if 'file' in df.columns: |
|
df = df.drop(columns=['file']) |
|
|
|
|
|
columns_to_remove = ["run_id", "user_id", "total_success_references", "Total Success References", "total_eval_samples", |
|
"total_samples", "samples_number"] |
|
for col in columns_to_remove: |
|
if col in df.columns: |
|
df = df.drop(columns=[col]) |
|
|
|
|
|
decimal_places = 4 if is_light_eval_detail else 2 |
|
for column in df.columns: |
|
try: |
|
if pd.api.types.is_float_dtype(df[column]): |
|
df[column] = df[column].round(decimal_places) |
|
except: |
|
continue |
|
|
|
|
|
column_mapping = {} |
|
for col in df.columns: |
|
|
|
if col.lower() in ["run_id", "user_id"]: |
|
continue |
|
|
|
|
|
if "turkish_semantic" in col.lower(): |
|
column_mapping[col] = "Turkish Semantic" |
|
continue |
|
|
|
|
|
if "multilingual_semantic" in col.lower(): |
|
column_mapping[col] = "Multilingual Semantic" |
|
continue |
|
|
|
|
|
if col == "Model Name" or " " in col: |
|
|
|
if " mean" in col.lower(): |
|
cleaned_col = col.replace(" mean", "").replace(" Mean", "") |
|
column_mapping[col] = cleaned_col |
|
continue |
|
|
|
|
|
if col == "model_name": |
|
column_mapping[col] = "Model Name" |
|
continue |
|
|
|
|
|
cleaned_col = col.replace(" mean", "").replace("_mean", "") |
|
|
|
|
|
formatted_col = " ".join([word.capitalize() for word in cleaned_col.replace("_", " ").split()]) |
|
column_mapping[col] = formatted_col |
|
|
|
|
|
if column_mapping: |
|
df = df.rename(columns=column_mapping) |
|
|
|
return df |
|
|
|
|
|
def check_user_login(profile): |
|
if profile is None: |
|
return False, "Please log in with your Hugging Face account to submit models for benchmarking." |
|
|
|
|
|
if isinstance(profile, str): |
|
if profile == "": |
|
return False, "Please log in with your Hugging Face account to submit models for benchmarking." |
|
return True, f"Logged in as {profile}" |
|
|
|
|
|
return True, f"Logged in as {profile.username}" |
|
|
|
def create_demo(): |
|
|
|
logger = logging.getLogger("mezura") |
|
|
|
with gr.Blocks(css=custom_css) as demo: |
|
|
|
logger.info("Updating supported base models at startup...") |
|
update_supported_base_models() |
|
logger.info("Base models updated successfully") |
|
|
|
gr.Markdown(TITLE) |
|
gr.Markdown(INTRODUCTION_TEXT) |
|
|
|
|
|
session_expiry = gr.State(None) |
|
|
|
try: |
|
|
|
benchmark_results = load_benchmark_results() |
|
default_plots = create_benchmark_plots(benchmark_results, "avg") |
|
|
|
|
|
login_state = gr.State(value=False) |
|
|
|
with gr.Tabs() as tabs: |
|
with gr.TabItem("🏆 LLM Benchmark", elem_id="llm-benchmark-tab"): |
|
gr.Markdown("## Model Evaluation Results") |
|
gr.Markdown("This screen shows model performance across different evaluation categories.") |
|
|
|
|
|
|
|
with gr.Row(): |
|
search_input = gr.Textbox( |
|
label="🔍 Search for your model (separate multiple queries with `;`) and press ENTER...", |
|
placeholder="Enter model name or evaluation information...", |
|
show_label=False |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Tabs() as benchmark_tabs: |
|
with gr.TabItem("🏆 Leaderboard"): |
|
|
|
|
|
combined_df = create_combined_leaderboard_table(benchmark_results) |
|
|
|
combined_df = format_dataframe(combined_df) |
|
|
|
|
|
if not combined_df.empty: |
|
leaderboard_df = combined_df.copy() |
|
else: |
|
leaderboard_df = pd.DataFrame({"Model Name": ["No data available"]}) |
|
|
|
|
|
original_leaderboard_data = gr.State(value=leaderboard_df) |
|
|
|
combined_table = gr.DataFrame( |
|
value=leaderboard_df, |
|
label="Model Performance Comparison", |
|
interactive=False, |
|
column_widths=["300px", "165px" ,"165px", "120px", "120px", "180px", "220px", "100px", "100px", "120px"] |
|
|
|
) |
|
|
|
with gr.TabItem("🏟️ Auto Arena"): |
|
|
|
arena_details_df = create_raw_details_table(benchmark_results, "arena") |
|
arena_details_df = format_dataframe(arena_details_df) |
|
|
|
if arena_details_df.empty: |
|
arena_details_df = pd.DataFrame({"model_name": ["No data available"]}) |
|
|
|
arena_table = gr.DataFrame( |
|
value=arena_details_df, |
|
label="Arena Detailed Results", |
|
interactive=False, |
|
column_widths=["300px", "150px", "110px", "110px", "180px", "100px", "120px"] |
|
|
|
) |
|
|
|
with gr.TabItem("👥 Human Arena"): |
|
|
|
human_arena_data = benchmark_results["raw"]["human_arena"] |
|
if human_arena_data: |
|
human_arena_df = create_human_arena_table(human_arena_data) |
|
else: |
|
human_arena_df = pd.DataFrame() |
|
|
|
human_arena_df = format_dataframe(human_arena_df) |
|
|
|
if human_arena_df.empty: |
|
human_arena_df = pd.DataFrame({"Model Name": ["No data available"]}) |
|
|
|
human_arena_table = gr.DataFrame( |
|
value=human_arena_df, |
|
label="Human Arena Results", |
|
interactive=False, |
|
column_widths=["300px", "150px", "110px", "110px", "110px", "156px", "169px", "100px", "120px"] |
|
|
|
) |
|
|
|
with gr.TabItem("📚 Retrieval"): |
|
|
|
rag_details_df = create_raw_details_table(benchmark_results, "retrieval") |
|
rag_details_df = format_dataframe(rag_details_df) |
|
|
|
if rag_details_df.empty: |
|
rag_details_df = pd.DataFrame({"model_name": ["No data available"]}) |
|
|
|
rag_table = gr.DataFrame( |
|
value=rag_details_df, |
|
label="Retrieval Detailed Results", |
|
interactive=False, |
|
column_widths=["280px", "120px", "140px", "140px", "140px", "120px", "160px", "100px", "120px"] |
|
|
|
) |
|
|
|
with gr.TabItem("⚡ Light Eval"): |
|
|
|
light_details_data = benchmark_results["raw"]["light_eval"] |
|
if light_details_data: |
|
light_details_df = create_light_eval_table(light_details_data, is_detail=True) |
|
else: |
|
light_details_df = pd.DataFrame() |
|
|
|
light_details_df = format_dataframe(light_details_df, is_light_eval_detail=True) |
|
|
|
if light_details_df.empty: |
|
light_details_df = pd.DataFrame({"model_name": ["No data available"]}) |
|
|
|
light_table = gr.DataFrame( |
|
value=light_details_df, |
|
label="Light Eval Detailed Results", |
|
interactive=False, |
|
column_widths=["300px", "110px", "110px", "143px", "130px", "130px", "110px", "110px", "100px", "120px"] |
|
|
|
) |
|
|
|
with gr.TabItem("📋 EvalMix"): |
|
|
|
hybrid_details_df = create_raw_details_table(benchmark_results, "evalmix") |
|
hybrid_details_df = format_dataframe(hybrid_details_df) |
|
|
|
if hybrid_details_df.empty: |
|
hybrid_details_df = pd.DataFrame({"model_name": ["No data available"]}) |
|
|
|
hybrid_table = gr.DataFrame( |
|
value=hybrid_details_df, |
|
label="EvalMix Detailed Results", |
|
interactive=False, |
|
column_widths=["300px", "180px", "230px", "143px", "110px", "110px", "110px", "110px", "169px", "220px" ,"100px", "120px"] |
|
|
|
) |
|
|
|
with gr.TabItem("🐍 𝐒𝐧𝐚𝐤𝐞 𝐁𝐞𝐧𝐜𝐡"): |
|
|
|
snake_details_df = create_raw_details_table(benchmark_results, "snake") |
|
snake_details_df = format_dataframe(snake_details_df) |
|
|
|
if snake_details_df.empty: |
|
snake_details_df = pd.DataFrame({"model_name": ["No data available"]}) |
|
|
|
snake_table = gr.DataFrame( |
|
value=snake_details_df, |
|
label="Snake Benchmark Detailed Results", |
|
interactive=False, |
|
column_widths=["300px", "130px", "110px", "117px", "110px", "110px", "110px", "117px", "100px", "120px"] |
|
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def search_all_tabs(query, original_data): |
|
""" |
|
Tüm sekmelerde arama yapar |
|
""" |
|
if not query or query.strip() == "": |
|
|
|
return (original_data, arena_details_df, human_arena_df, |
|
rag_details_df, light_details_df, hybrid_details_df, snake_details_df) |
|
|
|
|
|
return ( |
|
search_and_filter(query, original_data, "All"), |
|
search_and_filter(query, arena_details_df, "All"), |
|
search_and_filter(query, human_arena_df, "All"), |
|
search_and_filter(query, rag_details_df, "All"), |
|
search_and_filter(query, light_details_df, "All"), |
|
search_and_filter(query, hybrid_details_df, "All"), |
|
search_and_filter(query, snake_details_df, "All") |
|
) |
|
|
|
|
|
search_input.change( |
|
search_all_tabs, |
|
inputs=[search_input, original_leaderboard_data], |
|
outputs=[combined_table, arena_table, human_arena_table, rag_table, light_table, hybrid_table, snake_table] |
|
) |
|
|
|
with gr.TabItem("ℹ️ About", elem_id="about-tab"): |
|
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text") |
|
|
|
with gr.TabItem("📊 Datasets", elem_id="datasets-tab"): |
|
gr.Markdown("## Benchmark Datasets") |
|
gr.Markdown(""" |
|
This section provides detailed information about the datasets used in our evaluation benchmarks. |
|
Each dataset has been carefully selected and adapted to provide comprehensive model evaluation across different domains and capabilities. |
|
""") |
|
|
|
|
|
datasets_html = """ |
|
<div style="margin-top: 20px;"> |
|
<h3>Available Datasets for Evaluation</h3> |
|
<table style="width: 100%; border-collapse: collapse; margin-top: 10px;"> |
|
<thead> |
|
<tr style="background-color: var(--background-fill-secondary);"> |
|
<th style="padding: 12px; text-align: left; border-bottom: 2px solid var(--border-color-primary); width: 20%;">Dataset</th> |
|
<th style="padding: 12px; text-align: left; border-bottom: 2px solid var(--border-color-primary); width: 18%;">Evaluation Task</th> |
|
<th style="padding: 12px; text-align: left; border-bottom: 2px solid var(--border-color-primary); width: 10%;">Language</th> |
|
<th style="padding: 12px; text-align: left; border-bottom: 2px solid var(--border-color-primary); width: 52%;">Description</th> |
|
</tr> |
|
</thead> |
|
<tbody> |
|
<tr> |
|
<td style="padding: 10px; border-bottom: 1px solid var(--border-color-primary); width: 20%;"><a href="https://huggingface.co/datasets/malhajar/mmlu_tr-v0.2" target="_blank" style="color: #0066cc; text-decoration: none;">malhajar/mmlu_tr-v0.2</a></td> |
|
<td style="padding: 10px; border-bottom: 1px solid var(--border-color-primary); width: 18%;">Lighteval MMLU</td> |
|
<td style="padding: 10px; border-bottom: 1px solid var(--border-color-primary); width: 10%;">Turkish</td> |
|
<td style="padding: 10px; border-bottom: 1px solid var(--border-color-primary); width: 52%;">Turkish adaptation of MMLU (Massive Multitask Language Understanding) v0.2 covering 57 academic subjects including mathematics, physics, chemistry, biology, history, law, and computer science. Tests knowledge and reasoning capabilities across multiple domains with multiple-choice questions.</td> |
|
</tr> |
|
<tr> |
|
<td style="padding: 10px; border-bottom: 1px solid var(--border-color-primary); width: 20%;"><a href="https://huggingface.co/datasets/malhajar/truthful_qa-tr-v0.2" target="_blank" style="color: #0066cc; text-decoration: none;">malhajar/truthful_qa-tr-v0.2</a></td> |
|
<td style="padding: 10px; border-bottom: 1px solid var(--border-color-primary); width: 18%;">Lighteval TruthfulQA</td> |
|
<td style="padding: 10px; border-bottom: 1px solid var(--border-color-primary); width: 10%;">Turkish</td> |
|
<td style="padding: 10px; border-bottom: 1px solid var(--border-color-primary); width: 52%;">Turkish version of TruthfulQA (v0.2) designed to measure model truthfulness and resistance to generating false information. Contains questions where humans often answer incorrectly due to misconceptions or false beliefs, testing the model's ability to provide accurate information.</td> |
|
</tr> |
|
<tr> |
|
<td style="padding: 10px; border-bottom: 1px solid var(--border-color-primary); width: 20%;"><a href="https://huggingface.co/datasets/malhajar/winogrande-tr-v0.2" target="_blank" style="color: #0066cc; text-decoration: none;">malhajar/winogrande-tr-v0.2</a></td> |
|
<td style="padding: 10px; border-bottom: 1px solid var(--border-color-primary); width: 18%;">Lighteval WinoGrande</td> |
|
<td style="padding: 10px; border-bottom: 1px solid var(--border-color-primary); width: 10%;">Turkish</td> |
|
<td style="padding: 10px; border-bottom: 1px solid var(--border-color-primary); width: 52%;">Turkish adaptation of WinoGrande (v0.2) focusing on commonsense reasoning through pronoun resolution tasks. Tests the model's ability to understand context, make logical inferences, and resolve ambiguous pronouns in everyday scenarios.</td> |
|
</tr> |
|
<tr> |
|
<td style="padding: 10px; border-bottom: 1px solid var(--border-color-primary); width: 20%;"><a href="https://huggingface.co/datasets/malhajar/hellaswag_tr-v0.2" target="_blank" style="color: #0066cc; text-decoration: none;">malhajar/hellaswag_tr-v0.2</a></td> |
|
<td style="padding: 10px; border-bottom: 1px solid var(--border-color-primary); width: 18%;">Lighteval HellaSwag</td> |
|
<td style="padding: 10px; border-bottom: 1px solid var(--border-color-primary); width: 10%;">Turkish</td> |
|
<td style="padding: 10px; border-bottom: 1px solid var(--border-color-primary); width: 52%;">Turkish version of HellaSwag (v0.2) for commonsense reasoning evaluation. Tests the model's ability to predict plausible continuations of everyday scenarios and activities, requiring understanding of common sense and typical human behavior patterns.</td> |
|
</tr> |
|
<tr> |
|
<td style="padding: 10px; border-bottom: 1px solid var(--border-color-primary); width: 20%;"><a href="https://huggingface.co/datasets/malhajar/arc-tr-v0.2" target="_blank" style="color: #0066cc; text-decoration: none;">malhajar/arc-tr-v0.2</a></td> |
|
<td style="padding: 10px; border-bottom: 1px solid var(--border-color-primary); width: 18%;">Lighteval ARC</td> |
|
<td style="padding: 10px; border-bottom: 1px solid var(--border-color-primary); width: 10%;">Turkish</td> |
|
<td style="padding: 10px; border-bottom: 1px solid var(--border-color-primary); width: 52%;">Turkish adaptation of ARC (AI2 Reasoning Challenge) v0.2 focusing on science reasoning and question answering. Contains grade school level science questions that require reasoning beyond simple factual recall, covering topics in physics, chemistry, biology, and earth science.</td> |
|
</tr> |
|
<tr> |
|
<td style="padding: 10px; border-bottom: 1px solid var(--border-color-primary); width: 20%;"><a href="https://huggingface.co/datasets/malhajar/gsm8k_tr-v0.2" target="_blank" style="color: #0066cc; text-decoration: none;">malhajar/gsm8k_tr-v0.2</a></td> |
|
<td style="padding: 10px; border-bottom: 1px solid var(--border-color-primary); width: 18%;">Lighteval GSM8K</td> |
|
<td style="padding: 10px; border-bottom: 1px solid var(--border-color-primary); width: 10%;">Turkish</td> |
|
<td style="padding: 10px; border-bottom: 1px solid var(--border-color-primary); width: 52%;">Turkish version of GSM8K (Grade School Math 8K) v0.2 for mathematical reasoning evaluation. Contains grade school level math word problems that require multi-step reasoning, arithmetic operations, and logical problem-solving skills to arrive at the correct numerical answer.</td> |
|
</tr> |
|
<tr> |
|
<td style="padding: 10px; border-bottom: 1px solid var(--border-color-primary); width: 20%;"><a href="https://huggingface.co/datasets/newmindai/mezura-eval-data" target="_blank" style="color: #0066cc; text-decoration: none;">newmindai/mezura-eval-data</a></td> |
|
<td style="padding: 10px; border-bottom: 1px solid var(--border-color-primary); width: 18%;">Auto-Arena</td> |
|
<td style="padding: 10px; border-bottom: 1px solid var(--border-color-primary); width: 10%;">Turkish</td> |
|
<td style="padding: 10px; border-bottom: 1px solid var(--border-color-primary); width: 52%;">mezura-eval dataset is a Turkish-language legal text dataset designed for evaluation tasks with RAG context support. The subsets include domains like Environmental Law, Tax Law, Data Protection Law and Health Law each containing annotated samples. Every row includes structured fields such as the category, concept, input and contextual information drawn from sources like official decisions.</td> |
|
</tr> |
|
<tr> |
|
<td style="padding: 10px; border-bottom: 1px solid var(--border-color-primary); width: 20%;"><a href="https://huggingface.co/datasets/newmindai/mezura-eval-data" target="_blank" style="color: #0066cc; text-decoration: none;">newmindai/mezura-eval-data</a></td> |
|
<td style="padding: 10px; border-bottom: 1px solid var(--border-color-primary); width: 18%;">EvalMix</td> |
|
<td style="padding: 10px; border-bottom: 1px solid var(--border-color-primary); width: 10%;">Turkish</td> |
|
<td style="padding: 10px; border-bottom: 1px solid var(--border-color-primary); width: 52%;">mezura-eval dataset is a Turkish-language legal text dataset designed for evaluation tasks with RAG context support. The subsets include domains like Environmental Law, Tax Law, Data Protection Law and Health Law each containing annotated samples. Every row includes structured fields such as the category, concept, input and contextual information drawn from sources like official decisions.</td> |
|
</tr> |
|
<tr> |
|
<td style="padding: 10px; border-bottom: 1px solid var(--border-color-primary); width: 20%;"><a href="https://huggingface.co/datasets/newmindai/mezura-eval-data" target="_blank" style="color: #0066cc; text-decoration: none;">newmindai/mezura-eval-data</a></td> |
|
<td style="padding: 10px; border-bottom: 1px solid var(--border-color-primary); width: 18%;">Retrieval</td> |
|
<td style="padding: 10px; border-bottom: 1px solid var(--border-color-primary); width: 10%;">Turkish</td> |
|
<td style="padding: 10px; border-bottom: 1px solid var(--border-color-primary); width: 52%;">mezura-eval dataset is a Turkish-language legal text dataset designed for evaluation tasks with RAG context support. The subsets include domains like Environmental Law, Tax Law, Data Protection Law and Health Law each containing annotated samples. Every row includes structured fields such as the category, concept, input and contextual information drawn from sources like official decisions.</td> |
|
</tr> |
|
</tbody> |
|
</table> |
|
</div> |
|
""" |
|
gr.HTML(datasets_html) |
|
|
|
with gr.TabItem("🔬 Evaluation", elem_id="evaluation-tab"): |
|
gr.Markdown(""" |
|
<h2 align="center">Model Evaluation</h2> |
|
|
|
### Evaluation Process: |
|
|
|
1. **Login to Your Hugging Face Account** |
|
- You must be logged in to submit models for evaluation |
|
|
|
2. **Enter Model Name** |
|
- Input the HuggingFace model name or path you want to evaluate |
|
- Example: meta-llama/Meta-Llama-3.1-70B-Instruct |
|
|
|
3. **Select Base Model** |
|
- Choose the base model from the dropdown list |
|
- The system will verify if your repository is a valid HuggingFace repository |
|
- It will check if the model is trained from the selected base model |
|
|
|
4. **Start Evaluation** |
|
- Click the "Start All Benchmarks" button to begin the evaluation |
|
- If validation passes, your request will be processed |
|
- If validation fails, you'll see an error message |
|
|
|
### Important Limitations: |
|
- The model repository must be a maximum of 750 MB in size. |
|
- For trained adapters, the maximum LoRA rank must be 32. |
|
""") |
|
|
|
|
|
auth_container = gr.Group() |
|
with auth_container: |
|
|
|
login_button = gr.LoginButton() |
|
|
|
|
|
from api.config import get_base_model_list |
|
BASE_MODELS = get_base_model_list() |
|
|
|
|
|
if not BASE_MODELS: |
|
BASE_MODELS = [ |
|
"meta-llama/Meta-Llama-3.1-8B-Instruct", |
|
"meta-llama/Llama-3.2-3B-Instruct", |
|
"meta-llama/Llama-3.3-70B-Instruc", |
|
"Qwen/Qwen2.5-72B-Instruct", |
|
"Qwen/QwQ-32B", |
|
"google/gemma-2-2b-it" |
|
] |
|
|
|
|
|
login_dependent_content = gr.Group(visible=False) |
|
with login_dependent_content: |
|
gr.Markdown("### Model Submission") |
|
|
|
|
|
model_to_evaluate = gr.Textbox( |
|
label="Adapter Repo ID", |
|
placeholder="e.g., valadapt/llama-3-8b-turkish" |
|
) |
|
|
|
|
|
gr.Markdown(""" |
|
**Note:** Currently, only adapter models are supported. Merged models are not yet supported. |
|
""", elem_classes=["info-text"]) |
|
|
|
|
|
base_model_dropdown = gr.Dropdown( |
|
choices=BASE_MODELS, |
|
label="Base Model", |
|
allow_custom_value=True |
|
) |
|
|
|
|
|
reasoning_checkbox = gr.Checkbox( |
|
label="Reasoning", |
|
value=False, |
|
info="Enable reasoning capability during evaluation" |
|
) |
|
|
|
|
|
email_input = gr.Textbox( |
|
label="Email Address", |
|
placeholder="example@domain.com", |
|
info="You'll receive notification when benchmark is complete" |
|
) |
|
|
|
|
|
submit_button = gr.Button("Start All Benchmarks", variant="primary") |
|
|
|
|
|
result_output = gr.Markdown("") |
|
|
|
|
|
auth_error = gr.Markdown(visible=False) |
|
|
|
|
|
def toggle_form_visibility(profile): |
|
|
|
|
|
if profile is None: |
|
return ( |
|
gr.update(visible=False), |
|
gr.update( |
|
visible=True, |
|
value="<p style='color: red; text-align: center; font-weight: bold;'>Authentication required. Please log in with your Hugging Face account to submit models.</p>" |
|
) |
|
) |
|
|
|
|
|
try: |
|
|
|
if hasattr(profile, 'name'): |
|
username = profile.name |
|
elif hasattr(profile, 'username'): |
|
username = profile.username |
|
else: |
|
username = str(profile) |
|
|
|
logger.info(f"User authenticated: {username}") |
|
except Exception as e: |
|
logger.info(f"LOGIN - Error inspecting profile: {str(e)}") |
|
|
|
|
|
return ( |
|
gr.update(visible=True), |
|
gr.update(visible=False, value="") |
|
) |
|
|
|
|
|
login_button.click( |
|
fn=toggle_form_visibility, |
|
inputs=[login_button], |
|
outputs=[login_dependent_content, auth_error] |
|
) |
|
|
|
|
|
demo.load( |
|
fn=toggle_form_visibility, |
|
inputs=[login_button], |
|
outputs=[login_dependent_content, auth_error] |
|
) |
|
|
|
|
|
def submit_model(model, base_model, reasoning, email, profile): |
|
|
|
if profile is None: |
|
logging.warning("Unauthorized submission attempt with no profile") |
|
return "<p style='color: red; font-weight: bold;'>Authentication required. Please log in with your Hugging Face account.</p>" |
|
|
|
|
|
|
|
if isinstance(profile, str) and profile == "Sign in with Hugging Face": |
|
|
|
return "<p style='color: orange; font-weight: bold;'>⚠️ HF authentication required.</p>" |
|
|
|
|
|
if not email or email.strip() == "": |
|
return "<p style='color: red; font-weight: bold;'>Email address is required to receive benchmark results.</p>" |
|
|
|
|
|
try: |
|
from src.submission.check_validity import determine_model_type |
|
model_type, _ = determine_model_type(model) |
|
if model_type == "merged_model" or model_type == "merge": |
|
return "<p style='color: red; font-weight: bold;'>Merged models are not supported yet. Please submit an adapter model instead.</p>" |
|
except Exception as e: |
|
|
|
logging.warning(f"Error checking model type: {str(e)}") |
|
|
|
|
|
|
|
result_message, _ = submit_unified_benchmark(model, base_model, reasoning, email, profile) |
|
logging.info(f"Submission processed for model: {model}") |
|
return result_message |
|
|
|
|
|
submit_button.click( |
|
fn=submit_model, |
|
inputs=[ |
|
model_to_evaluate, |
|
base_model_dropdown, |
|
reasoning_checkbox, |
|
email_input, |
|
login_button |
|
], |
|
outputs=[result_output] |
|
) |
|
|
|
except Exception as e: |
|
traceback.print_exc() |
|
gr.Markdown(f"## Error: An issue occurred while loading the LLM Benchmark screen") |
|
gr.Markdown(f"Error message: {str(e)}") |
|
gr.Markdown("Please check your configuration and try again.") |
|
|
|
|
|
gr.Markdown("---") |
|
with gr.Accordion(CITATION_BUTTON_LABEL, open=False): |
|
gr.Textbox( |
|
value=CITATION_BUTTON_TEXT, |
|
lines=10, |
|
show_copy_button=True, |
|
label=None |
|
) |
|
|
|
return demo |
|
|
|
if __name__ == "__main__": |
|
|
|
logger = logging.getLogger("mezura") |
|
|
|
|
|
class SensitiveFilter(logging.Filter): |
|
def filter(self, record): |
|
msg = record.getMessage().lower() |
|
|
|
sensitive_patterns = ["token", "__sign=", "request", "auth", "http request"] |
|
return not any(pattern in msg.lower() for pattern in sensitive_patterns) |
|
|
|
|
|
for logger_name in logging.root.manager.loggerDict: |
|
logging.getLogger(logger_name).addFilter(SensitiveFilter()) |
|
|
|
try: |
|
logger.info("Creating demo...") |
|
demo = create_demo() |
|
logger.info("Launching demo on 0.0.0.0...") |
|
|
|
|
|
demo.launch( |
|
server_name="0.0.0.0", |
|
server_port=7860 |
|
) |
|
|
|
except FileNotFoundError as e: |
|
logger.critical(f"Configuration file not found: {e}") |
|
print(f"\n\nERROR: Configuration file not found. Please ensure config/api_config.yaml exists.\n{e}\n") |
|
sys.exit(1) |
|
except ValueError as e: |
|
logger.critical(f"Configuration error: {e}") |
|
print(f"\n\nERROR: Invalid configuration. Please check your config/api_config.yaml file.\n{e}\n") |
|
sys.exit(1) |
|
except Exception as e: |
|
logger.critical(f"Could not launch demo: {e}", exc_info=True) |
|
|