File size: 11,446 Bytes
7955a8e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9034ee7
 
7955a8e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9034ee7
7955a8e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
# README — Loading `things_eeg_2` from `nonarjb/alignvis`

This repo hosts WebDataset shard sets under `things_eeg_2/`:

* `things_eeg_2-images-*.tar` — images
* `things_eeg_2-image_embeddings-*.tar` — vector embeddings (`.npy/.npz`)
* `things_eeg_2-preprocessed_eeg-*.tar` — EEG arrays (`.npy/.npz`)

Inside each shard, the WebDataset `__key__` is the file’s **relative path under the top folder (without extension)**.
To reconstruct the original relative path, use:

```
rel_path = "<top>/" + __key__ + "." + <ext>
```

(e.g., `images/training_images/01133_raincoat/raincoat_01s.jpg`)

> To use the **other dataset** (`things_meg`), just replace `dataset_dir="things_eeg_2"` with `dataset_dir="things_meg"` in the examples below.

---

## Install

```bash
pip install webdataset huggingface_hub pillow torch tqdm
# Optional: faster transfers for big files
pip install -U hf_transfer && export HF_HUB_ENABLE_HF_TRANSFER=1
```

---

## Helper: list shard URLs from the Hub

Create `utils_hf_wds.py`:

```python
# utils_hf_wds.py
from huggingface_hub import HfFileSystem, hf_hub_url

def hf_tar_urls(repo_id: str, dataset_dir: str, top: str, revision: str = "main"):
    """
    Return sorted 'resolve/<revision>' URLs for shards matching:
        <dataset_dir>/<dataset_dir>-<top>-*.tar
    Example: things_eeg_2/things_eeg_2-images-000000.tar
    """
    fs = HfFileSystem()
    pattern = f"datasets/{repo_id}/{dataset_dir}/{dataset_dir}-{top}-*.tar"
    hf_paths = sorted(fs.glob(pattern))  # hf://datasets/<repo_id>/...
    rel_paths = [p.split(f"datasets/{repo_id}/", 1)[1] for p in hf_paths]
    return [
        hf_hub_url(repo_id, filename=p, repo_type="dataset", revision=revision)
        for p in rel_paths
    ]
```

---

## A) Images (PIL) with original relative paths

```python
import io
from PIL import Image
import torch, webdataset as wds
from utils_hf_wds import hf_tar_urls

REPO = "nonarjb/alignvis"

def make_images_loader(dataset_dir="things_eeg_2", batch_size=16, num_workers=4):
    urls = hf_tar_urls(REPO, dataset_dir, top="images")
    if not urls: raise RuntimeError("No image shards found")

    def pick_image(s):
        for ext in ("jpg","jpeg","png"):
            if ext in s:
                s["img_bytes"] = s[ext]
                s["rel_path"] = f"images/{s['__key__']}.{ext}"
                return s
        return None

    ds = (wds.WebDataset(urls, shardshuffle=False, handler=wds.handlers.warn_and_continue)
            .map(pick_image).select(lambda s: s is not None)
            .map(lambda s: (s["rel_path"], Image.open(io.BytesIO(s["img_bytes"])).convert("RGB"))))

    return torch.utils.data.DataLoader(
        ds, batch_size=batch_size, num_workers=num_workers, collate_fn=lambda b: b
    )

loader = make_images_loader()
rel_path, pil_img = next(iter(loader))[0]
print(rel_path, pil_img.size)  # e.g. images/training_images/.../raincoat_01s.jpg (W, H)
```

---

## B) Image embeddings (`.npy/.npz`) → `torch.Tensor`

```python
import io, numpy as np
import torch, webdataset as wds
from utils_hf_wds import hf_tar_urls

REPO = "nonarjb/alignvis"

# Heuristics for dict-like payloads
CANDIDATE_KEYS = ("embedding", "emb", "vector", "feat", "features", "clip", "image", "text")

def _first_numeric_from_npz(npz, prefer_key=None):
    if prefer_key and prefer_key in npz:
        return np.asarray(npz[prefer_key])
    # try direct numeric arrays
    for k in npz.files:
        a = npz[k]
        if isinstance(a, np.ndarray) and np.issubdtype(a.dtype, np.number):
            return a
    # try dict-like entries with known keys
    for k in npz.files:
        a = npz[k]
        if isinstance(a, dict):
            for ck in CANDIDATE_KEYS:
                if ck in a:
                    return np.asarray(a[ck])
    return None

def _load_numeric_vector(payload: bytes, ext: str, prefer_key: str | None = None):
    """Return 1D float32 vector or None if not numeric."""
    bio = io.BytesIO(payload)
    try:
        arr = np.load(bio, allow_pickle=False)
    except ValueError as e:
        if "Object arrays" in str(e):
            bio.seek(0)
            obj = np.load(bio, allow_pickle=True)
            if isinstance(obj, dict):
                for ck in CANDIDATE_KEYS:
                    if ck in obj:
                        arr = obj[ck]; break
                else:
                    return None
            elif isinstance(obj, (list, tuple)):
                arr = np.asarray(obj)
            else:
                return None
        else:
            raise
    arr = np.asarray(arr)
    if not np.issubdtype(arr.dtype, np.number):
        try:
            arr = arr.astype(np.float32)
        except Exception:
            return None
    return arr.reshape(-1).astype(np.float32)

def make_embeddings_loader(
    dataset_dir="things_eeg_2",
    batch_size=64,
    num_workers=4,
    prefer_key: str | None = None,   # e.g., "embedding" if you know the field name
):
    urls = hf_tar_urls(REPO, dataset_dir, top="image_embeddings")
    if not urls:
        raise RuntimeError("No embedding shards found")

    def pick_payload(s):
        for ext in ("npy", "npz"):
            if ext in s:
                s["__ext__"] = ext
                s["payload"] = s[ext]
                s["rel_path"] = f"image_embeddings/{s['__key__']}.{ext}"
                return s
        return None

    def decode_vec(s):
        vec = _load_numeric_vector(s["payload"], s["__ext__"], prefer_key=prefer_key)
        if vec is None:
            # skip non-numeric payloads
            return None
        return (s["rel_path"], torch.from_numpy(vec))

    ds = (
        wds.WebDataset(urls, shardshuffle=False, handler=wds.handlers.warn_and_continue)
        .map(pick_payload).select(lambda s: s is not None)
        .map(decode_vec).select(lambda x: x is not None)
    )

    # Collate into a batch tensor; all vectors must have same dim
    def collate(batch):
        paths, vecs = zip(*batch)
        D = vecs[0].numel()
        vecs = [v.view(-1) for v in vecs if v.numel() == D]
        paths = [p for (p, v) in batch if v.numel() == D]
        return list(paths), torch.stack(vecs, dim=0)

    return torch.utils.data.DataLoader(ds, batch_size=batch_size, num_workers=num_workers, collate_fn=collate)

# ---- try it (set num_workers=0 first if you want easier debugging) ----
if __name__ == "__main__":
    paths, X = next(iter(make_embeddings_loader(num_workers=0, prefer_key=None)))
    print(len(paths), X.shape)

```

---

## C) EEG (`.npy/.npz`) — ragged-friendly (returns list of arrays)

```python
import io, re
import webdataset as wds
from huggingface_hub import HfFileSystem, hf_hub_url
import numpy as np

REPO_ID = "nonarjb/alignvis"   # your dataset repo on HF
REVISION = "main"
DATASET_DIR = "things_eeg_2"   # the folder inside the repo

def _hf_eeg_urls(repo_id=REPO_ID, dataset_dir=DATASET_DIR, revision=REVISION):
    """Collect EEG shard URLs for both possible top folders."""
    fs = HfFileSystem()
    urls = []
    for top in ("Preprocessed_data_250Hz", "preprocessed_eeg"):
        pattern = f"datasets/{repo_id}/{dataset_dir}/{dataset_dir}-{top}-*.tar"
        hf_paths = sorted(fs.glob(pattern))
        rel = [p.split(f"datasets/{repo_id}/", 1)[1] for p in hf_paths]
        urls += [hf_hub_url(repo_id, filename=p, repo_type="dataset", revision=revision) for p in rel]
    return urls

def _load_subject_eeg_from_hf(subject_id: int, split: str):
    """
    Returns (subject_eeg_data, ch_names) for a given subject+split
    by streaming the per-subject .npy/.npz from HF shards.
    """
    urls = _hf_eeg_urls()
    if not urls:
        raise RuntimeError("No EEG shards found in HF repo")
    filebase = "preprocessed_eeg_training" if split == "train" else "preprocessed_eeg_test"
    key_prefix = f"sub-{subject_id:02d}/"

    ds = wds.WebDataset(urls, shardshuffle=False)
    for s in ds:
        # find the per-subject file
        if ("npy" in s or "npz" in s) and s["__key__"].startswith(key_prefix) and s["__key__"].endswith(filebase):
            ext = "npz" if "npz" in s else "npy"
            payload = s[ext]
            bio = io.BytesIO(payload)

            # load with safe first, fallback to pickle (original code used allow_pickle=True)
            if ext == "npz":
                try:
                    z = np.load(bio, allow_pickle=False)
                except Exception:
                    bio.seek(0); z = np.load(bio, allow_pickle=True)
                # prefer exact fields as in your original code
                eeg_data = z["preprocessed_eeg_data"]
                ch_names = z["ch_names"] if "ch_names" in z else None
            else:  # npy
                try:
                    obj = np.load(bio, allow_pickle=False)
                except ValueError:
                    bio.seek(0); obj = np.load(bio, allow_pickle=True)

                # obj could be dict-like or 0-d object holding a dict
                if isinstance(obj, dict):
                    eeg_data = obj["preprocessed_eeg_data"]
                    ch_names = obj.get("ch_names")
                elif isinstance(obj, np.ndarray) and obj.dtype == object and obj.shape == ():
                    d = obj.item()
                    eeg_data = d["preprocessed_eeg_data"]
                    ch_names = d.get("ch_names")
                else:
                    # if it’s already a numeric array (unlikely for your case)
                    eeg_data = obj
                    ch_names = None

            return np.asarray(eeg_data), ch_names

    raise FileNotFoundError(f"Subject file not found in HF shards: {key_prefix}{filebase}.(npy|npz)")


subject_eeg_data, ch_names = _load_subject_eeg_from_hf(subject_id=1, split="train")
print(subject_eeg_data.shape)
print(ch_names)
```

> If some `.npy` were saved as **object-dtype**, resave as numeric arrays; otherwise you must load with `allow_pickle=True` (only if you trust the data).

---

## D) Download, **untar**, and use locally (byte-identical files)

```python
# 1) Download the dataset subtree
from huggingface_hub import snapshot_download
local_root = snapshot_download(
    "nonarjb/alignvis", repo_type="dataset", allow_patterns=["things_eeg_2/**"]
)

# 2) Untar to a restore directory (keys preserved under each top folder)
import tarfile, glob, pathlib

restore_root = pathlib.Path("./restore/things_eeg_2")

for top in ("images", "image_embeddings", "preprocessed_eeg"):
    (restore_root / top).mkdir(parents=True, exist_ok=True)
    for t in glob.glob(f"{local_root}/things_eeg_2/things_eeg_2-{top}-*.tar"):
        with tarfile.open(t) as tf:
            tf.extractall(restore_root / top)

print("Restored under:", restore_root)
```

Now the folder tree mirrors the original:

```python
# Example local usage
from PIL import Image
import numpy as np

img = Image.open("./restore/things_eeg_2/images/training_images/01133_raincoat/raincoat_01s.jpg")
vec = np.load("./restore/things_eeg_2/image_embeddings/some/file.npy")
eeg = np.load("./restore/things_eeg_2/preprocessed_eeg/s01/run3/segment_0001.npy", allow_pickle=False)
```

---

### Notes

* WebDataset can also read **local** shards by passing `file://` URLs instead of `https://`.
* If your shards are named differently, tweak `hf_tar_urls(..., top="...")` and the `rel_path` prefixes (`images/`, `image_embeddings/`, `preprocessed_eeg/`).
* To batch EEG tensors, implement padding in the `collate` function.