splice-benchmark / splice_banchmark.py
prokajevo's picture
Add files using upload-large-folder tool
a1d4687 verified
import json
import os
import datasets
_DESCRIPTION = """
SPLICE is a human-curated benchmark designed to evaluate the temporal and causal reasoning
capabilities of Multimodal Large Language Models (MLLMs). The core task is to reorder a set of
shuffled video segments from a single procedural event into their correct chronological sequence.
The dataset is derived from 3,381 instructional videos from the COIN dataset, segmented into
11,423 coherent event clips.
"""
_CITATION = """
@inproceedings{
ballout2025can,
title={{Can you {SPLICE} it together? A Human Curated Benchmark for Probing Visual Reasoning in {VLM}s}},
author={Mohamad Ballout* and Okajevo Wilfred* and Seyedalireza Yaghoubi and Nohayr Muhammad Abdelmoneim and Julius Mayer and Elia Bruni},
booktitle={The 2025 Conference on Empirical Methods in Natural Language Processing},
year={2025},
url={https://openreview.net/forum?id=deFgBHsHxl}
}
"""
class SpliceBenchmark(datasets.GeneratorBasedBuilder):
"""The SPLICE Benchmark Dataset."""
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"video_id": datasets.Value("string"),
"domain": datasets.Value("string"),
"class": datasets.Value("string"),
"subset": datasets.Value("string"),
"video_url": datasets.Value("string"),
"duration": datasets.Value("float"),
"segments": datasets.Sequence(
{
"part": datasets.Value("int32"),
"segment_id": datasets.Value("string"),
"label": datasets.Value("string"),
"start": datasets.Value("float"),
"end": datasets.Value("float"),
"video_clip": datasets.Video()
}
),
}
),
homepage="https://huggingface.co/datasets/prokajevo/splice-benchmark",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
data_dir = dl_manager.download_and_extract(".")
metadata_path = os.path.join(data_dir, "splice_segment_metadata.json")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"metadata_path": metadata_path, "data_dir": data_dir},
),
]
def _generate_examples(self, metadata_path, data_dir):
with open(metadata_path, "r") as f:
data = json.load(f)
video_count = 0
for video_info in data:
try:
if not video_info.get("segments"):
continue
segments_data = []
for segment in video_info.get("segments", []):
if "output_path" in segment and segment["output_path"]:
video_path = os.path.join(data_dir, segment["output_path"])
if os.path.exists(video_path):
segments_data.append({
"part": segment.get("part", -1),
"segment_id": segment.get("segment_id", ""),
"label": segment.get("label", ""),
"start": segment.get("start", -1.0),
"end": segment.get("end", -1.0),
"video_clip": video_path,
})
if segments_data:
yield video_count, {
"video_id": video_info.get("video_id", ""),
"domain": video_info.get("Domain", ""),
"class": video_info.get("class", ""),
"subset": video_info.get("subset", ""),
"video_url": video_info.get("video_url", ""),
"duration": video_info.get("duration", -1.0),
"segments": segments_data,
}
video_count += 1
except Exception as e:
print(f"--> WARNING: Skipping corrupted data for video {video_info.get('video_id', 'unknown')}. Error: {e}")
continue