|
import gradio as gr |
|
|
|
def render_eval_info(): |
|
text = r""" |
|
|
|
### How to compute your own EER score file ? |
|
|
|
In order to streamline the evaluation process across many models and datasets, we |
|
have developed df_arena_toolkit which can be used to compute score files for evaluation. |
|
The tool can be found at https://github.com/Speech-Arena/speech_df_arena. |
|
|
|
### Usage |
|
#### 1. Data Preparation |
|
Create metadata.csv for your desired dataset with below format: |
|
|
|
``` |
|
file_name,label |
|
/path/to/audio1,spoof |
|
/path/to/audio2,bonafide |
|
... |
|
|
|
``` |
|
NOTE : The labels should contain "spoof" for spoofed samples and "bonafide" for real samples. |
|
All the file_name paths should be absolute |
|
|
|
#### 2. Evaluation |
|
|
|
Example usage : |
|
```py |
|
python evaluation.py --model_name wavlm_ecapa |
|
--batch_size 32 |
|
--protocol_file_path /path/to/metadata.csv |
|
--model_path /path/to/model.ckpt |
|
--out_score_file_name scores.txt |
|
--trim pad |
|
--num workers 4 |
|
``` |
|
|
|
""" |
|
return gr.Markdown(text, latex_delimiters=[{ "left": "$", "right": "$", "display": True }]) |
|
|