README — Loading things_eeg_2
from nonarjb/alignvis
This repo hosts WebDataset shard sets under things_eeg_2/
:
things_eeg_2-images-*.tar
— imagesthings_eeg_2-image_embeddings-*.tar
— vector embeddings (.npy/.npz
)things_eeg_2-preprocessed_eeg-*.tar
— EEG arrays (.npy/.npz
)
Inside each shard, the WebDataset __key__
is the file’s relative path under the top folder (without extension).
To reconstruct the original relative path, use:
rel_path = "<top>/" + __key__ + "." + <ext>
(e.g., images/training_images/01133_raincoat/raincoat_01s.jpg
)
To use the other dataset (
things_meg
), just replacedataset_dir="things_eeg_2"
withdataset_dir="things_meg"
in the examples below.
Install
pip install webdataset huggingface_hub pillow torch tqdm
# Optional: faster transfers for big files
pip install -U hf_transfer && export HF_HUB_ENABLE_HF_TRANSFER=1
Helper: list shard URLs from the Hub
Create utils_hf_wds.py
:
# utils_hf_wds.py
from huggingface_hub import HfFileSystem, hf_hub_url
def hf_tar_urls(repo_id: str, dataset_dir: str, top: str, revision: str = "main"):
"""
Return sorted 'resolve/<revision>' URLs for shards matching:
<dataset_dir>/<dataset_dir>-<top>-*.tar
Example: things_eeg_2/things_eeg_2-images-000000.tar
"""
fs = HfFileSystem()
pattern = f"datasets/{repo_id}/{dataset_dir}/{dataset_dir}-{top}-*.tar"
hf_paths = sorted(fs.glob(pattern)) # hf://datasets/<repo_id>/...
rel_paths = [p.split(f"datasets/{repo_id}/", 1)[1] for p in hf_paths]
return [
hf_hub_url(repo_id, filename=p, repo_type="dataset", revision=revision)
for p in rel_paths
]
A) Images (PIL) with original relative paths
import io
from PIL import Image
import torch, webdataset as wds
from utils_hf_wds import hf_tar_urls
REPO = "nonarjb/alignvis"
def make_images_loader(dataset_dir="things_eeg_2", batch_size=16, num_workers=4):
urls = hf_tar_urls(REPO, dataset_dir, top="images")
if not urls: raise RuntimeError("No image shards found")
def pick_image(s):
for ext in ("jpg","jpeg","png"):
if ext in s:
s["img_bytes"] = s[ext]
s["rel_path"] = f"images/{s['__key__']}.{ext}"
return s
return None
ds = (wds.WebDataset(urls, shardshuffle=False, handler=wds.handlers.warn_and_continue)
.map(pick_image).select(lambda s: s is not None)
.map(lambda s: (s["rel_path"], Image.open(io.BytesIO(s["img_bytes"])).convert("RGB"))))
return torch.utils.data.DataLoader(
ds, batch_size=batch_size, num_workers=num_workers, collate_fn=lambda b: b
)
loader = make_images_loader()
rel_path, pil_img = next(iter(loader))[0]
print(rel_path, pil_img.size) # e.g. images/training_images/.../raincoat_01s.jpg (W, H)
B) Image embeddings (.npy/.npz
) → torch.Tensor
import io, numpy as np
import torch, webdataset as wds
from utils_hf_wds import hf_tar_urls
REPO = "nonarjb/alignvis"
# Heuristics for dict-like payloads
CANDIDATE_KEYS = ("embedding", "emb", "vector", "feat", "features", "clip", "image", "text")
def _first_numeric_from_npz(npz, prefer_key=None):
if prefer_key and prefer_key in npz:
return np.asarray(npz[prefer_key])
# try direct numeric arrays
for k in npz.files:
a = npz[k]
if isinstance(a, np.ndarray) and np.issubdtype(a.dtype, np.number):
return a
# try dict-like entries with known keys
for k in npz.files:
a = npz[k]
if isinstance(a, dict):
for ck in CANDIDATE_KEYS:
if ck in a:
return np.asarray(a[ck])
return None
def _load_numeric_vector(payload: bytes, ext: str, prefer_key: str | None = None):
"""Return 1D float32 vector or None if not numeric."""
bio = io.BytesIO(payload)
try:
arr = np.load(bio, allow_pickle=False)
except ValueError as e:
if "Object arrays" in str(e):
bio.seek(0)
obj = np.load(bio, allow_pickle=True)
if isinstance(obj, dict):
for ck in CANDIDATE_KEYS:
if ck in obj:
arr = obj[ck]; break
else:
return None
elif isinstance(obj, (list, tuple)):
arr = np.asarray(obj)
else:
return None
else:
raise
arr = np.asarray(arr)
if not np.issubdtype(arr.dtype, np.number):
try:
arr = arr.astype(np.float32)
except Exception:
return None
return arr.reshape(-1).astype(np.float32)
def make_embeddings_loader(
dataset_dir="things_eeg_2",
batch_size=64,
num_workers=4,
prefer_key: str | None = None, # e.g., "embedding" if you know the field name
):
urls = hf_tar_urls(REPO, dataset_dir, top="image_embeddings")
if not urls:
raise RuntimeError("No embedding shards found")
def pick_payload(s):
for ext in ("npy", "npz"):
if ext in s:
s["__ext__"] = ext
s["payload"] = s[ext]
s["rel_path"] = f"image_embeddings/{s['__key__']}.{ext}"
return s
return None
def decode_vec(s):
vec = _load_numeric_vector(s["payload"], s["__ext__"], prefer_key=prefer_key)
if vec is None:
# skip non-numeric payloads
return None
return (s["rel_path"], torch.from_numpy(vec))
ds = (
wds.WebDataset(urls, shardshuffle=False, handler=wds.handlers.warn_and_continue)
.map(pick_payload).select(lambda s: s is not None)
.map(decode_vec).select(lambda x: x is not None)
)
# Collate into a batch tensor; all vectors must have same dim
def collate(batch):
paths, vecs = zip(*batch)
D = vecs[0].numel()
vecs = [v.view(-1) for v in vecs if v.numel() == D]
paths = [p for (p, v) in batch if v.numel() == D]
return list(paths), torch.stack(vecs, dim=0)
return torch.utils.data.DataLoader(ds, batch_size=batch_size, num_workers=num_workers, collate_fn=collate)
# ---- try it (set num_workers=0 first if you want easier debugging) ----
if __name__ == "__main__":
paths, X = next(iter(make_embeddings_loader(num_workers=0, prefer_key=None)))
print(len(paths), X.shape)
C) EEG (.npy/.npz
) — ragged-friendly (returns list of arrays)
import io, re
import webdataset as wds
from huggingface_hub import HfFileSystem, hf_hub_url
import numpy as np
REPO_ID = "nonarjb/alignvis" # your dataset repo on HF
REVISION = "main"
DATASET_DIR = "things_eeg_2" # the folder inside the repo
def _hf_eeg_urls(repo_id=REPO_ID, dataset_dir=DATASET_DIR, revision=REVISION):
"""Collect EEG shard URLs for both possible top folders."""
fs = HfFileSystem()
urls = []
for top in ("Preprocessed_data_250Hz", "preprocessed_eeg"):
pattern = f"datasets/{repo_id}/{dataset_dir}/{dataset_dir}-{top}-*.tar"
hf_paths = sorted(fs.glob(pattern))
rel = [p.split(f"datasets/{repo_id}/", 1)[1] for p in hf_paths]
urls += [hf_hub_url(repo_id, filename=p, repo_type="dataset", revision=revision) for p in rel]
return urls
def _load_subject_eeg_from_hf(subject_id: int, split: str):
"""
Returns (subject_eeg_data, ch_names) for a given subject+split
by streaming the per-subject .npy/.npz from HF shards.
"""
urls = _hf_eeg_urls()
if not urls:
raise RuntimeError("No EEG shards found in HF repo")
filebase = "preprocessed_eeg_training" if split == "train" else "preprocessed_eeg_test"
key_prefix = f"sub-{subject_id:02d}/"
ds = wds.WebDataset(urls, shardshuffle=False)
for s in ds:
# find the per-subject file
if ("npy" in s or "npz" in s) and s["__key__"].startswith(key_prefix) and s["__key__"].endswith(filebase):
ext = "npz" if "npz" in s else "npy"
payload = s[ext]
bio = io.BytesIO(payload)
# load with safe first, fallback to pickle (original code used allow_pickle=True)
if ext == "npz":
try:
z = np.load(bio, allow_pickle=False)
except Exception:
bio.seek(0); z = np.load(bio, allow_pickle=True)
# prefer exact fields as in your original code
eeg_data = z["preprocessed_eeg_data"]
ch_names = z["ch_names"] if "ch_names" in z else None
else: # npy
try:
obj = np.load(bio, allow_pickle=False)
except ValueError:
bio.seek(0); obj = np.load(bio, allow_pickle=True)
# obj could be dict-like or 0-d object holding a dict
if isinstance(obj, dict):
eeg_data = obj["preprocessed_eeg_data"]
ch_names = obj.get("ch_names")
elif isinstance(obj, np.ndarray) and obj.dtype == object and obj.shape == ():
d = obj.item()
eeg_data = d["preprocessed_eeg_data"]
ch_names = d.get("ch_names")
else:
# if it’s already a numeric array (unlikely for your case)
eeg_data = obj
ch_names = None
return np.asarray(eeg_data), ch_names
raise FileNotFoundError(f"Subject file not found in HF shards: {key_prefix}{filebase}.(npy|npz)")
subject_eeg_data, ch_names = _load_subject_eeg_from_hf(subject_id=1, split="train")
print(subject_eeg_data.shape)
print(ch_names)
If some
.npy
were saved as object-dtype, resave as numeric arrays; otherwise you must load withallow_pickle=True
(only if you trust the data).
D) Download, untar, and use locally (byte-identical files)
# 1) Download the dataset subtree
from huggingface_hub import snapshot_download
local_root = snapshot_download(
"nonarjb/alignvis", repo_type="dataset", allow_patterns=["things_eeg_2/**"]
)
# 2) Untar to a restore directory (keys preserved under each top folder)
import tarfile, glob, pathlib
restore_root = pathlib.Path("./restore/things_eeg_2")
for top in ("images", "image_embeddings", "preprocessed_eeg"):
(restore_root / top).mkdir(parents=True, exist_ok=True)
for t in glob.glob(f"{local_root}/things_eeg_2/things_eeg_2-{top}-*.tar"):
with tarfile.open(t) as tf:
tf.extractall(restore_root / top)
print("Restored under:", restore_root)
Now the folder tree mirrors the original:
# Example local usage
from PIL import Image
import numpy as np
img = Image.open("./restore/things_eeg_2/images/training_images/01133_raincoat/raincoat_01s.jpg")
vec = np.load("./restore/things_eeg_2/image_embeddings/some/file.npy")
eeg = np.load("./restore/things_eeg_2/preprocessed_eeg/s01/run3/segment_0001.npy", allow_pickle=False)
Notes
- WebDataset can also read local shards by passing
file://
URLs instead ofhttps://
. - If your shards are named differently, tweak
hf_tar_urls(..., top="...")
and therel_path
prefixes (images/
,image_embeddings/
,preprocessed_eeg/
). - To batch EEG tensors, implement padding in the
collate
function.