|
--- |
|
dataset_info: |
|
features: |
|
- name: context |
|
dtype: audio |
|
- name: instruction |
|
dtype: string |
|
- name: answer |
|
dtype: string |
|
splits: |
|
- name: test |
|
num_bytes: 142157786.0 |
|
num_examples: 1004 |
|
download_size: 139557753 |
|
dataset_size: 142157786.0 |
|
configs: |
|
- config_name: default |
|
data_files: |
|
- split: test |
|
path: data/test-* |
|
--- |
|
|
|
``` |
|
@article{busso2008iemocap, |
|
title={IEMOCAP: Interactive emotional dyadic motion capture database}, |
|
author={Busso, Carlos and Bulut, Murtaza and Lee, Chi-Chun and Kazemzadeh, Abe and Mower, Emily and Kim, Samuel and Chang, Jeannette N and Lee, Sungbok and Narayanan, Shrikanth S}, |
|
journal={Language resources and evaluation}, |
|
volume={42}, |
|
pages={335--359}, |
|
year={2008}, |
|
publisher={Springer} |
|
} |
|
``` |
|
|
|
|
|
``` |
|
@article{wang2024audiobench, |
|
title={AudioBench: A Universal Benchmark for Audio Large Language Models}, |
|
author={Wang, Bin and Zou, Xunlong and Lin, Geyu and Sun, Shuo and Liu, Zhuohan and Zhang, Wenyu and Liu, Zhengyuan and Aw, AiTi and Chen, Nancy F}, |
|
journal={NAACL}, |
|
year={2025} |
|
} |
|
``` |