The dataset viewer is not available for this subset.
Exception: SplitsNotFoundError Message: The split names could not be parsed from the dataset config. Traceback: Traceback (most recent call last): File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/inspect.py", line 289, in get_dataset_config_info for split_generator in builder._split_generators( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/packaged_modules/webdataset/webdataset.py", line 81, in _split_generators first_examples = list(islice(pipeline, self.NUM_EXAMPLES_FOR_FEATURES_INFERENCE)) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/packaged_modules/webdataset/webdataset.py", line 55, in _get_pipeline_from_tar current_example[field_name] = cls.DECODERS[data_extension](current_example[field_name]) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/packaged_modules/webdataset/webdataset.py", line 302, in npy_loads return numpy.lib.format.read_array(stream, allow_pickle=False) File "/src/services/worker/.venv/lib/python3.9/site-packages/numpy/lib/format.py", line 782, in read_array version = read_magic(fp) File "/src/services/worker/.venv/lib/python3.9/site-packages/numpy/lib/format.py", line 238, in read_magic raise ValueError(msg % (MAGIC_PREFIX, magic_str[:-2])) ValueError: the magic string is not correct; expected b'\x93NUMPY', got b'\x80\x04\x95\xa3\x00\x00' The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/src/services/worker/src/worker/job_runners/config/split_names.py", line 65, in compute_split_names_from_streaming_response for split in get_dataset_split_names( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/inspect.py", line 343, in get_dataset_split_names info = get_dataset_config_info( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/inspect.py", line 294, in get_dataset_config_info raise SplitsNotFoundError("The split names could not be parsed from the dataset config.") from err datasets.inspect.SplitsNotFoundError: The split names could not be parsed from the dataset config.
Need help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.
README — Loading things_eeg_2
from nonarjb/alignvis
This repo hosts WebDataset shard sets under things_eeg_2/
:
things_eeg_2-images-*.tar
— imagesthings_eeg_2-image_embeddings-*.tar
— vector embeddings (.npy/.npz
)things_eeg_2-preprocessed_eeg-*.tar
— EEG arrays (.npy/.npz
)
Inside each shard, the WebDataset __key__
is the file’s relative path under the top folder (without extension).
To reconstruct the original relative path, use:
rel_path = "<top>/" + __key__ + "." + <ext>
(e.g., images/training_images/01133_raincoat/raincoat_01s.jpg
)
To use the other dataset (
things_meg
), just replacedataset_dir="things_eeg_2"
withdataset_dir="things_meg"
in the examples below.
Install
pip install webdataset huggingface_hub pillow torch tqdm
# Optional: faster transfers for big files
pip install -U hf_transfer && export HF_HUB_ENABLE_HF_TRANSFER=1
Helper: list shard URLs from the Hub
Create utils_hf_wds.py
:
# utils_hf_wds.py
from huggingface_hub import HfFileSystem, hf_hub_url
def hf_tar_urls(repo_id: str, dataset_dir: str, top: str, revision: str = "main"):
"""
Return sorted 'resolve/<revision>' URLs for shards matching:
<dataset_dir>/<dataset_dir>-<top>-*.tar
Example: things_eeg_2/things_eeg_2-images-000000.tar
"""
fs = HfFileSystem()
pattern = f"datasets/{repo_id}/{dataset_dir}/{dataset_dir}-{top}-*.tar"
hf_paths = sorted(fs.glob(pattern)) # hf://datasets/<repo_id>/...
rel_paths = [p.split(f"datasets/{repo_id}/", 1)[1] for p in hf_paths]
return [
hf_hub_url(repo_id, filename=p, repo_type="dataset", revision=revision)
for p in rel_paths
]
A) Images (PIL) with original relative paths
import io
from PIL import Image
import torch, webdataset as wds
from utils_hf_wds import hf_tar_urls
REPO = "nonarjb/alignvis"
def make_images_loader(dataset_dir="things_eeg_2", batch_size=16, num_workers=4):
urls = hf_tar_urls(REPO, dataset_dir, top="images")
if not urls: raise RuntimeError("No image shards found")
def pick_image(s):
for ext in ("jpg","jpeg","png"):
if ext in s:
s["img_bytes"] = s[ext]
s["rel_path"] = f"images/{s['__key__']}.{ext}"
return s
return None
ds = (wds.WebDataset(urls, shardshuffle=False, handler=wds.handlers.warn_and_continue)
.map(pick_image).select(lambda s: s is not None)
.map(lambda s: (s["rel_path"], Image.open(io.BytesIO(s["img_bytes"])).convert("RGB"))))
return torch.utils.data.DataLoader(
ds, batch_size=batch_size, num_workers=num_workers, collate_fn=lambda b: b
)
loader = make_images_loader()
rel_path, pil_img = next(iter(loader))[0]
print(rel_path, pil_img.size) # e.g. images/training_images/.../raincoat_01s.jpg (W, H)
B) Image embeddings (.npy/.npz
) → torch.Tensor
import io, numpy as np
import torch, webdataset as wds
from utils_hf_wds import hf_tar_urls
REPO = "nonarjb/alignvis"
# Heuristics for dict-like payloads
CANDIDATE_KEYS = ("embedding", "emb", "vector", "feat", "features", "clip", "image", "text")
def _first_numeric_from_npz(npz, prefer_key=None):
if prefer_key and prefer_key in npz:
return np.asarray(npz[prefer_key])
# try direct numeric arrays
for k in npz.files:
a = npz[k]
if isinstance(a, np.ndarray) and np.issubdtype(a.dtype, np.number):
return a
# try dict-like entries with known keys
for k in npz.files:
a = npz[k]
if isinstance(a, dict):
for ck in CANDIDATE_KEYS:
if ck in a:
return np.asarray(a[ck])
return None
def _load_numeric_vector(payload: bytes, ext: str, prefer_key: str | None = None):
"""Return 1D float32 vector or None if not numeric."""
bio = io.BytesIO(payload)
try:
arr = np.load(bio, allow_pickle=False)
except ValueError as e:
if "Object arrays" in str(e):
bio.seek(0)
obj = np.load(bio, allow_pickle=True)
if isinstance(obj, dict):
for ck in CANDIDATE_KEYS:
if ck in obj:
arr = obj[ck]; break
else:
return None
elif isinstance(obj, (list, tuple)):
arr = np.asarray(obj)
else:
return None
else:
raise
arr = np.asarray(arr)
if not np.issubdtype(arr.dtype, np.number):
try:
arr = arr.astype(np.float32)
except Exception:
return None
return arr.reshape(-1).astype(np.float32)
def make_embeddings_loader(
dataset_dir="things_eeg_2",
batch_size=64,
num_workers=4,
prefer_key: str | None = None, # e.g., "embedding" if you know the field name
):
urls = hf_tar_urls(REPO, dataset_dir, top="image_embeddings")
if not urls:
raise RuntimeError("No embedding shards found")
def pick_payload(s):
for ext in ("npy", "npz"):
if ext in s:
s["__ext__"] = ext
s["payload"] = s[ext]
s["rel_path"] = f"image_embeddings/{s['__key__']}.{ext}"
return s
return None
def decode_vec(s):
vec = _load_numeric_vector(s["payload"], s["__ext__"], prefer_key=prefer_key)
if vec is None:
# skip non-numeric payloads
return None
return (s["rel_path"], torch.from_numpy(vec))
ds = (
wds.WebDataset(urls, shardshuffle=False, handler=wds.handlers.warn_and_continue)
.map(pick_payload).select(lambda s: s is not None)
.map(decode_vec).select(lambda x: x is not None)
)
# Collate into a batch tensor; all vectors must have same dim
def collate(batch):
paths, vecs = zip(*batch)
D = vecs[0].numel()
vecs = [v.view(-1) for v in vecs if v.numel() == D]
paths = [p for (p, v) in batch if v.numel() == D]
return list(paths), torch.stack(vecs, dim=0)
return torch.utils.data.DataLoader(ds, batch_size=batch_size, num_workers=num_workers, collate_fn=collate)
# ---- try it (set num_workers=0 first if you want easier debugging) ----
if __name__ == "__main__":
paths, X = next(iter(make_embeddings_loader(num_workers=0, prefer_key=None)))
print(len(paths), X.shape)
C) EEG (.npy/.npz
) — ragged-friendly (returns list of arrays)
import io, re
import webdataset as wds
from huggingface_hub import HfFileSystem, hf_hub_url
import numpy as np
REPO_ID = "nonarjb/alignvis" # your dataset repo on HF
REVISION = "main"
DATASET_DIR = "things_eeg_2" # the folder inside the repo
def _hf_eeg_urls(repo_id=REPO_ID, dataset_dir=DATASET_DIR, revision=REVISION):
"""Collect EEG shard URLs for both possible top folders."""
fs = HfFileSystem()
urls = []
for top in ("Preprocessed_data_250Hz", "preprocessed_eeg"):
pattern = f"datasets/{repo_id}/{dataset_dir}/{dataset_dir}-{top}-*.tar"
hf_paths = sorted(fs.glob(pattern))
rel = [p.split(f"datasets/{repo_id}/", 1)[1] for p in hf_paths]
urls += [hf_hub_url(repo_id, filename=p, repo_type="dataset", revision=revision) for p in rel]
return urls
def _load_subject_eeg_from_hf(subject_id: int, split: str):
"""
Returns (subject_eeg_data, ch_names) for a given subject+split
by streaming the per-subject .npy/.npz from HF shards.
"""
urls = _hf_eeg_urls()
if not urls:
raise RuntimeError("No EEG shards found in HF repo")
filebase = "preprocessed_eeg_training" if split == "train" else "preprocessed_eeg_test"
key_prefix = f"sub-{subject_id:02d}/"
ds = wds.WebDataset(urls, shardshuffle=False)
for s in ds:
# find the per-subject file
if ("npy" in s or "npz" in s) and s["__key__"].startswith(key_prefix) and s["__key__"].endswith(filebase):
ext = "npz" if "npz" in s else "npy"
payload = s[ext]
bio = io.BytesIO(payload)
# load with safe first, fallback to pickle (original code used allow_pickle=True)
if ext == "npz":
try:
z = np.load(bio, allow_pickle=False)
except Exception:
bio.seek(0); z = np.load(bio, allow_pickle=True)
# prefer exact fields as in your original code
eeg_data = z["preprocessed_eeg_data"]
ch_names = z["ch_names"] if "ch_names" in z else None
else: # npy
try:
obj = np.load(bio, allow_pickle=False)
except ValueError:
bio.seek(0); obj = np.load(bio, allow_pickle=True)
# obj could be dict-like or 0-d object holding a dict
if isinstance(obj, dict):
eeg_data = obj["preprocessed_eeg_data"]
ch_names = obj.get("ch_names")
elif isinstance(obj, np.ndarray) and obj.dtype == object and obj.shape == ():
d = obj.item()
eeg_data = d["preprocessed_eeg_data"]
ch_names = d.get("ch_names")
else:
# if it’s already a numeric array (unlikely for your case)
eeg_data = obj
ch_names = None
return np.asarray(eeg_data), ch_names
raise FileNotFoundError(f"Subject file not found in HF shards: {key_prefix}{filebase}.(npy|npz)")
subject_eeg_data, ch_names = _load_subject_eeg_from_hf(subject_id=1, split="train")
print(subject_eeg_data.shape)
print(ch_names)
If some
.npy
were saved as object-dtype, resave as numeric arrays; otherwise you must load withallow_pickle=True
(only if you trust the data).
D) Download, untar, and use locally (byte-identical files)
# 1) Download the dataset subtree
from huggingface_hub import snapshot_download
local_root = snapshot_download(
"nonarjb/alignvis", repo_type="dataset", allow_patterns=["things_eeg_2/**"]
)
# 2) Untar to a restore directory (keys preserved under each top folder)
import tarfile, glob, pathlib
restore_root = pathlib.Path("./restore/things_eeg_2")
for top in ("images", "image_embeddings", "preprocessed_eeg"):
(restore_root / top).mkdir(parents=True, exist_ok=True)
for t in glob.glob(f"{local_root}/things_eeg_2/things_eeg_2-{top}-*.tar"):
with tarfile.open(t) as tf:
tf.extractall(restore_root / top)
print("Restored under:", restore_root)
Now the folder tree mirrors the original:
# Example local usage
from PIL import Image
import numpy as np
img = Image.open("./restore/things_eeg_2/images/training_images/01133_raincoat/raincoat_01s.jpg")
vec = np.load("./restore/things_eeg_2/image_embeddings/some/file.npy")
eeg = np.load("./restore/things_eeg_2/preprocessed_eeg/s01/run3/segment_0001.npy", allow_pickle=False)
Notes
- WebDataset can also read local shards by passing
file://
URLs instead ofhttps://
. - If your shards are named differently, tweak
hf_tar_urls(..., top="...")
and therel_path
prefixes (images/
,image_embeddings/
,preprocessed_eeg/
). - To batch EEG tensors, implement padding in the
collate
function.
- Downloads last month
- 691