language-metric-data / language-metric-data.py
mshamrai's picture
Update language-metric-data.py
ed1544c verified
import os
import pickle
import numpy as np
import datasets
# _CITATION = """\
# @inproceedings{your_citation,
# title={Your Dataset Title},
# author={Your Name},
# year={2025},
# booktitle={Conference or Journal Name}
# }
# """
# _DESCRIPTION = """\
# This dataset contains the entire content of three files loaded as a single example:
# - `languages_list.pkl`: A pickled list of language strings.
# - `average_distances_matrix.npy`: A NumPy matrix converted to a list of lists of floats.
# - `distances_matrices.pkl`: A pickled dict of dicts of NumPy matrices.
# It is converted into a list of records where each record corresponds to a dataset with a nested list of models and their associated distance matrices.
# """
# _LICENSE = "Specify your license here."
class DistancesDataset(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
def _info(self):
# Define features for the whole dataset as a single example.
features = datasets.Features({
"languages_list": datasets.Sequence(datasets.Value("string")),
"average_distances_matrix": datasets.Sequence(
datasets.Sequence(datasets.Value("float"))
),
"distances_matrices": datasets.Sequence(
datasets.Features({
"dataset_name": datasets.Value("string"),
"models": datasets.Sequence(
datasets.Features({
"model_name": datasets.Value("string"),
"matrix": datasets.Sequence(
datasets.Sequence(datasets.Value("float"))
)
})
)
})
)
})
return datasets.DatasetInfo(
# description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage="",
# license=_LICENSE,
# citation=_CITATION,
)
def _split_generators(self, dl_manager):
# All files are expected in the data_dir (e.g. repository root)
# data_dir = self.config.data_dir if hasattr(self.config, "data_dir") else ""
data_dir = dl_manager.download_and_extract(["languages_list.pkl", "average_distances_matrix.npy", "distances_matrices.pkl"])
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"languages_list_path": data_dir[0],
"avg_matrix_path": data_dir[1],
"distances_matrices_path": data_dir[2]},
)
]
def _generate_examples(self, languages_list_path, avg_matrix_path, distances_matrices_path):
# Load languages_list.pkl (list of strings)
with open(languages_list_path, "rb") as f:
languages_list = pickle.load(f)
# Load average_distances_matrix.npy (NumPy matrix converted to list of lists)
average_distances_matrix = np.load(avg_matrix_path, allow_pickle=True)
# average_distances_matrix = average_distances_matrix.tolist()
# Load distances_matrices.pkl (dict of dicts of NumPy matrices)
with open(distances_matrices_path, "rb") as f:
distances_matrices = pickle.load(f)
# Convert the nested dict structure into a list of dictionaries.
# Each outer key corresponds to a dataset name.
# Each inner dict is a mapping from model name to a NumPy matrix.
distances_matrices_list = []
for dataset_name, models_dict in distances_matrices.items():
models_list = []
for model_name, matrix in models_dict.items():
models_list.append({
"model_name": model_name,
"matrix": matrix
})
distances_matrices_list.append({
"dataset_name": dataset_name,
"models": models_list
})
# Yield a single example containing the whole dataset.
yield 0, {
"languages_list": languages_list,
"average_distances_matrix": average_distances_matrix,
"distances_matrices": distances_matrices_list
}