zhangfz commited on
Commit
db22f07
·
1 Parent(s): c33c288
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. logs_svd_gated/mode_13_param_gated_seed_41/config.json +25 -0
  2. logs_svd_gated/mode_13_param_gated_seed_41/training_log_53ddf4cc-e033-4acf-bc2d-5f9f5c822ce1.txt +0 -0
  3. logs_svd_gated/mode_13_param_gated_seed_41/training_log_d270cd1a-a1f4-441e-a235-1836f2598c11.txt +1760 -0
  4. logs_svd_gated/mode_13_param_gated_seed_42/config.json +25 -0
  5. logs_svd_gated/mode_13_param_gated_seed_42/training_log_29ca794e-db48-4228-89b9-294e22f93633.txt +0 -0
  6. logs_svd_gated/mode_13_param_gated_seed_43/config.json +25 -0
  7. logs_svd_gated/mode_13_param_gated_seed_43/training_log_9ef1b43c-d8df-464d-9246-7f66cf8bbaee.txt +0 -0
  8. logs_svd_gated/mode_13_param_gated_seed_44/config.json +25 -0
  9. logs_svd_gated/mode_13_param_gated_seed_44/training_log_46d4e9f2-2b76-454e-bfe1-cd91263cd3ea.txt +0 -0
  10. logs_svd_gated/mode_13_param_gated_seed_45/config.json +25 -0
  11. logs_svd_gated/mode_13_param_gated_seed_45/training_log_78f20870-5eda-4682-aced-8cedd91a0415.txt +0 -0
  12. logs_svd_gated/mode_13_param_gated_seed_46/config.json +25 -0
  13. logs_svd_gated/mode_13_param_gated_seed_46/training_log_f15d7967-d463-4726-99a0-e07de412ca4e.txt +0 -0
  14. logs_svd_gated/mode_13_param_gated_seed_47/config.json +25 -0
  15. logs_svd_gated/mode_13_param_gated_seed_47/training_log_3f513c1c-b909-494f-92a7-f9975950351b.txt +0 -0
  16. logs_svd_gated/mode_13_param_gated_seed_48/config.json +25 -0
  17. logs_svd_gated/mode_13_param_gated_seed_48/training_log_1d32ef1a-6c9c-42b2-8a59-62ddd2143fab.txt +0 -0
  18. logs_svd_gated/mode_13_param_gated_seed_49/config.json +25 -0
  19. logs_svd_gated/mode_13_param_gated_seed_49/training_log_1531a5c8-fb60-4f63-ad76-0f25f42b48db.txt +0 -0
  20. logs_svd_gated/mode_13_param_gated_seed_50/config.json +25 -0
  21. logs_svd_gated/mode_13_param_gated_seed_50/training_log_f5f8623b-17fe-4271-a358-8cb57ae238a1.txt +0 -0
  22. logs_svd_gated/mode_14_param_gated_seed_41/config.json +25 -0
  23. logs_svd_gated/mode_14_param_gated_seed_41/training_log_3a060110-ad46-4bb9-9bfc-220548766993.txt +0 -0
  24. logs_svd_gated/mode_14_param_gated_seed_42/config.json +25 -0
  25. logs_svd_gated/mode_14_param_gated_seed_42/training_log_3c8ef23e-8e99-4dfb-af73-28cde593d61e.txt +0 -0
  26. logs_svd_gated/mode_14_param_gated_seed_43/config.json +25 -0
  27. logs_svd_gated/mode_14_param_gated_seed_43/training_log_1c163800-35b0-4389-b3a7-5f103382de01.txt +0 -0
  28. logs_svd_gated/mode_14_param_gated_seed_44/config.json +25 -0
  29. logs_svd_gated/mode_14_param_gated_seed_44/training_log_b3f636bf-aaae-4f63-84ba-14c79a0fac04.txt +0 -0
  30. logs_svd_gated/mode_14_param_gated_seed_45/config.json +25 -0
  31. logs_svd_gated/mode_14_param_gated_seed_45/training_log_45ff64f4-c3fe-4a27-b43d-8b30681d5861.txt +0 -0
  32. logs_svd_gated/mode_14_param_gated_seed_46/config.json +25 -0
  33. logs_svd_gated/mode_14_param_gated_seed_46/training_log_f81cb117-3729-42b6-a4bf-001b4dc1d990.txt +0 -0
  34. logs_svd_gated/mode_14_param_gated_seed_47/config.json +25 -0
  35. logs_svd_gated/mode_14_param_gated_seed_47/training_log_7633ecfb-e90e-4d78-8c06-563f8c802dee.txt +0 -0
  36. logs_svd_gated/mode_14_param_gated_seed_48/config.json +25 -0
  37. logs_svd_gated/mode_14_param_gated_seed_48/training_log_add87a33-be2f-4e3e-afdf-d3bf661e7185.txt +0 -0
  38. logs_svd_gated/mode_14_param_gated_seed_49/config.json +25 -0
  39. logs_svd_gated/mode_14_param_gated_seed_49/training_log_a67ed87a-addb-45e9-9122-746d6f16e641.txt +0 -0
  40. logs_svd_gated/mode_14_param_gated_seed_50/config.json +25 -0
  41. logs_svd_gated/mode_14_param_gated_seed_50/training_log_20edc821-54ac-4e8f-8176-8387c86d21f5.txt +0 -0
  42. logs_svd_gated/mode_15_param_gated_seed_41/config.json +25 -0
  43. logs_svd_gated/mode_15_param_gated_seed_41/training_log_f146521e-11b7-47c0-93e1-af861941cb9b.txt +0 -0
  44. logs_svd_gated/mode_15_param_gated_seed_42/config.json +25 -0
  45. logs_svd_gated/mode_15_param_gated_seed_42/training_log_1501a628-3a92-4eec-9378-5faa95a74a96.txt +0 -0
  46. logs_svd_gated/mode_15_param_gated_seed_43/config.json +25 -0
  47. logs_svd_gated/mode_15_param_gated_seed_43/training_log_e0218743-5660-4687-92f9-454060288cb7.txt +0 -0
  48. logs_svd_gated/mode_15_param_gated_seed_44/config.json +25 -0
  49. logs_svd_gated/mode_15_param_gated_seed_44/training_log_4acb41e0-2540-49e6-8cc2-39b68527f0d1.txt +0 -0
  50. logs_svd_gated/mode_15_param_gated_seed_45/config.json +25 -0
logs_svd_gated/mode_13_param_gated_seed_41/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "unet": false,
4
+ "seed": 41,
5
+ "optimizer_mode": 13,
6
+ "model_parameterization": "gated",
7
+ "adam_lr": 0.05,
8
+ "muon_lr": 0.05,
9
+ "base_dir": "logs_svd_gated"
10
+ },
11
+ "hyperparameters": {
12
+ "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
13
+ "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
14
+ "val_tokens": 1966080,
15
+ "train_seq_len": 12288,
16
+ "val_seq_len": 65536,
17
+ "num_iterations": 10000,
18
+ "cooldown_frac": 0.4,
19
+ "vocab_size": 50257,
20
+ "val_loss_every": 200,
21
+ "save_checkpoint": false
22
+ },
23
+ "run_uuid_for_log": "53ddf4cc-e033-4acf-bc2d-5f9f5c822ce1",
24
+ "script_code_logged_at_start": true
25
+ }
logs_svd_gated/mode_13_param_gated_seed_41/training_log_53ddf4cc-e033-4acf-bc2d-5f9f5c822ce1.txt ADDED
The diff for this file is too large to render. See raw diff
 
logs_svd_gated/mode_13_param_gated_seed_41/training_log_d270cd1a-a1f4-441e-a235-1836f2598c11.txt ADDED
@@ -0,0 +1,1760 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2025-09-04 15:58:23] [Rank 0] PRINT: --- Script Start: Thu Sep 4 15:58:23 2025 ---
2
+ [2025-09-04 15:58:23] [Rank 0] PRINT: --- Script Start: Thu Sep 4 15:58:23 2025 ---
3
+ [2025-09-04 15:58:23] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=41, optimizer_mode=13, model_parameterization='gated', adam_lr=0.05, muon_lr=0.05, base_dir='logs_svd_gated')
4
+ [2025-09-04 15:58:23] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=41, optimizer_mode=13, model_parameterization='gated', adam_lr=0.05, muon_lr=0.05, base_dir='logs_svd_gated')
5
+ [2025-09-04 15:58:23] [Rank 0] PRINT: Hyperparameters: Hyperparameters()
6
+ [2025-09-04 15:58:23] [Rank 0] PRINT: Hyperparameters: Hyperparameters()
7
+ [2025-09-04 15:58:23] [Rank 0] PRINT: Using fixed seed: 41
8
+ [2025-09-04 15:58:23] [Rank 0] PRINT: Using fixed seed: 41
9
+ [2025-09-04 15:58:23] [Rank 0] PRINT: Run directory: logs_svd_gated/mode_13_param_gated_seed_41
10
+ [2025-09-04 15:58:23] [Rank 0] PRINT: Run directory: logs_svd_gated/mode_13_param_gated_seed_41
11
+ [2025-09-04 15:58:23] [Rank 0] import os
12
+ import sys
13
+ with open(sys.argv[0]) as f:
14
+ code = f.read() # read the code of this file ASAP, for logging
15
+ import uuid
16
+ import time
17
+ import copy
18
+ import glob
19
+ from dataclasses import dataclass, asdict
20
+ from functools import lru_cache
21
+ from pathlib import Path
22
+ import argparse # Keep argparse for --unet and potentially --optimizer_mode
23
+ import json
24
+ import random
25
+ import numpy as np
26
+
27
+ os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
28
+ import torch
29
+ torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems
30
+ from torch import Tensor, nn
31
+ import torch.nn.functional as F
32
+ import torch.distributed as dist
33
+ # use of FlexAttention contributed by @KoszarskyB
34
+ from torch.nn.attention.flex_attention import BlockMask, flex_attention
35
+ sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present
36
+ from optimizers.MUON_new import Muon
37
+ from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed
38
+
39
+ #from kn_util.utils import setup_debugpy
40
+ #torch._inductor.config.coordinate_descent_tuning = True
41
+
42
+ # -----------------------------------------------------------------------------
43
+
44
+ mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports
45
+
46
+ # -----------------------------------------------------------------------------
47
+ # Seeding Function
48
+ def set_seed(seed):
49
+ random.seed(seed)
50
+ np.random.seed(seed)
51
+ torch.manual_seed(seed)
52
+ if torch.cuda.is_available():
53
+ torch.cuda.manual_seed_all(seed)
54
+ print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks
55
+
56
+ # -----------------------------------------------------------------------------
57
+ # Our own simple Distributed Data Loader (KEEP AS IS)
58
+ def _load_data_shard(file: Path):
59
+ header = torch.from_file(str(file), False, 256, dtype=torch.int32)
60
+ assert header[0] == 20240520, "magic number mismatch in the data .bin file"
61
+ assert header[1] == 1, "unsupported version"
62
+ num_tokens = int(header[2])
63
+ with file.open("rb", buffering=0) as f:
64
+ tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True)
65
+ f.seek(256 * 4)
66
+ nbytes = f.readinto(tokens.numpy())
67
+ assert nbytes == 2 * num_tokens, "number of tokens read does not match header"
68
+ return tokens
69
+
70
+ def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int):
71
+ files = [Path(file) for file in sorted(glob.glob(filename_pattern))]
72
+ assert batch_size % world_size == 0
73
+ local_batch_size = batch_size // world_size
74
+ file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training
75
+ tokens, pos = _load_data_shard(next(file_iter)), 0
76
+ while True:
77
+ if pos + batch_size + 1 >= len(tokens):
78
+ tokens, pos = _load_data_shard(next(file_iter)), 0
79
+ buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1]
80
+ inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side;
81
+ targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful.
82
+ pos += batch_size
83
+ yield inputs, targets
84
+
85
+ # ---- ADD: spectral metrics helper right after calculate_svd_entropy ----
86
+ def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10):
87
+ """
88
+ Returns dict with:
89
+ - entropy_norm: normalized SVD entropy (same normalization as your function)
90
+ - erank: effective rank = exp(Shannon entropy of p)
91
+ - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values)
92
+ - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2)
93
+ """
94
+ with torch.no_grad():
95
+ s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32))
96
+ s = s[s > 1e-9]
97
+ n = s.numel()
98
+ if n == 0:
99
+ return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf'))
100
+
101
+ s2 = s * s
102
+ S2_sum = float(torch.sum(s2))
103
+ if S2_sum == 0.0:
104
+ return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf'))
105
+
106
+ p = s2 / S2_sum # energy distribution
107
+ # Shannon entropy H (natural log)
108
+ H = float(torch.sum(torch.special.entr(p)))
109
+ entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy
110
+ erank = float(np.exp(H))
111
+
112
+ k = min(topk, n)
113
+ topk_energy = float(torch.topk(p, k).values.sum())
114
+
115
+ # eigenvalues = s^2, use quantiles on s^2
116
+ q25 = float(torch.quantile(s2, 0.25))
117
+ q75 = float(torch.quantile(s2, 0.75))
118
+ q75_q25 = (q75 / q25) if q25 > 0 else float('inf')
119
+
120
+ return dict(
121
+ entropy_norm=entropy_norm,
122
+ erank=erank,
123
+ topk_energy=topk_energy,
124
+ q75_q25=q75_q25,
125
+ )
126
+
127
+
128
+ # -----------------------------------------------------------------------------
129
+ # int main
130
+ parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon")
131
+ parser.add_argument("--unet", action="store_true", help="Use U-net architecture")
132
+ parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility")
133
+ # --- MODIFICATION: Add optimizer_mode as a CLI argument ---
134
+ parser.add_argument("--optimizer_mode", type=int, default=0,
135
+ help="Defines how Muon is applied. "
136
+ "0: Muon(All Hidden Attn+MLP - original); "
137
+ "1: Muon(QK Attn)/Adam(VO Attn,MLP); "
138
+ "2: Muon(VO Attn)/Adam(QK Attn,MLP); "
139
+ "3: Muon(All Attn)/Adam(MLP); "
140
+ "4: Muon(MLP)/Adam(All Attn)"
141
+ "5: All Adam (No Muon, all applicable matrices to Adam)."
142
+ "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)."
143
+ "7: Muon(VO Attn, MLP)/Adam(QK Attn)."
144
+ "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)."
145
+ "11: Muon(W_1)/Adam(O Attn, QK Attn)."
146
+ )
147
+ parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"])
148
+ parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices")
149
+ parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices")
150
+ parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs")
151
+ exp_args = parser.parse_args()
152
+ set_seed(exp_args.seed)
153
+
154
+ # --- MODIFICATION: Import correct GPT model based on --unet flag ---
155
+ if exp_args.unet:
156
+ print("Using U-net architecture")
157
+ from models.nano_GPT_unet import GPT
158
+ elif exp_args.model_parameterization == "qkvo":
159
+ print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w")
160
+ # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w
161
+
162
+ from models.nano_GPT_qkvo import GPT
163
+
164
+ elif exp_args.model_parameterization == "norope":
165
+ print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w")
166
+ from models.nano_GPT_norope import GPT
167
+
168
+ elif exp_args.model_parameterization == "gated":
169
+ print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w")
170
+ from models.nano_GPT_gated import GPT
171
+
172
+ elif exp_args.model_parameterization == "whole":
173
+ print("Using original architecture")
174
+ from models.nano_GPT import GPT
175
+
176
+ @dataclass
177
+ class Hyperparameters:
178
+ # data
179
+
180
+ #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin"
181
+ #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin"
182
+ train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin"
183
+ val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin"
184
+ val_tokens = 1966080
185
+ #val_tokens = 10485760
186
+ train_seq_len = 12*1024
187
+ val_seq_len = 4*16*1024
188
+ #train_seq_len = 48*1024 # FlexAttention sequence length
189
+ #train_seq_len = 12*1024 # FlexAttention sequence length
190
+ #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation
191
+
192
+ # optimization
193
+ num_iterations = 10000 #1770 # Original: 1770
194
+ cooldown_frac = 0.4
195
+ # architecture
196
+
197
+ vocab_size = 50257
198
+
199
+ # evaluation and logging
200
+ val_loss_every = 200 # Original: 125
201
+ save_checkpoint = False
202
+ args = Hyperparameters()
203
+
204
+ # DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used)
205
+ rank = int(os.environ.get("RANK", 0))
206
+ local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting
207
+ world_size = int(os.environ.get("WORLD_SIZE", 1))
208
+
209
+ # print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug
210
+
211
+ assert torch.cuda.is_available()
212
+ device = torch.device("cuda", local_rank) # Use local_rank for device
213
+ torch.cuda.set_device(device)
214
+
215
+ if not dist.is_initialized(): # Ensure DDP is initialized only once
216
+ dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size
217
+ dist.barrier()
218
+ master_process = (rank == 0)
219
+
220
+ # Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename)
221
+ logfile = None
222
+ # --- MODIFICATION: Add optimizer_mode to log file name and specify new dir ---
223
+ #log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes"
224
+ #if master_process:
225
+ # run_id = uuid.uuid4()
226
+ # os.makedirs(log_dir, exist_ok=True) # Create new log directory
227
+ # logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt"
228
+ # print(f"Logging to: {logfile}")
229
+
230
+ logfile = None
231
+ run_dir_path_str = None
232
+
233
+ base_log_dir = Path(exp_args.base_dir)
234
+
235
+ if master_process:
236
+ # Set seed again specifically for master process for operations like dir creation, config saving
237
+ set_seed(exp_args.seed)
238
+
239
+ # Construct folder name based on config and seed
240
+ run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}"
241
+ run_dir_path = base_log_dir / run_folder_name
242
+ run_dir_path.mkdir(parents=True, exist_ok=True)
243
+ run_dir_path_str = str(run_dir_path)
244
+
245
+ run_uuid = uuid.uuid4()
246
+ logfile = run_dir_path / f"training_log_{run_uuid}.txt"
247
+ print(f"Logging to: {logfile}")
248
+
249
+ # Save configuration
250
+ config_to_save = {
251
+ "cli_args": vars(exp_args),
252
+ "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)},
253
+ "run_uuid_for_log": str(run_uuid),
254
+ "script_code_logged_at_start": True
255
+ }
256
+ config_file_path = run_dir_path / "config.json"
257
+ with open(config_file_path, "w") as f:
258
+ json.dump(config_to_save, f, indent=4)
259
+ print(f"Saved configuration to: {config_file_path}")
260
+
261
+ def print0(s, console=False):
262
+ if master_process:
263
+ # Add timestamp and rank for better log readability
264
+ timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
265
+ log_message = f"[{timestamp}] [Rank {rank}] {s}"
266
+
267
+ # Print to console if requested or if it's a specific "PRINT:" message
268
+ if console or s.startswith("PRINT:"):
269
+ actual_s = s[6:] if s.startswith("PRINT:") else s
270
+ print(actual_s) # Print to stdout for master process
271
+
272
+ if logfile:
273
+ with open(logfile, "a") as f:
274
+ f.write(log_message + "\n")
275
+
276
+ with open(logfile, "a") as f:
277
+ f.write(log_message + "\n")
278
+
279
+
280
+ print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True)
281
+ print0(f"PRINT: Parsed CLI args: {exp_args}", console=True)
282
+ print0(f"PRINT: Hyperparameters: {args}", console=True)
283
+ print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True)
284
+ if master_process:
285
+ print0(f"PRINT: Run directory: {run_dir_path_str}", console=True)
286
+ print0(code) # Log the code
287
+ # ... (other initial logs)
288
+
289
+ ########################################
290
+ # Construct model and optimizer #
291
+ ########################################
292
+ print0("PRINT: Constructing model...", console=True)
293
+ model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768,
294
+ max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda()
295
+ for m in model.modules():
296
+ if isinstance(m, nn.Embedding):
297
+ m.bfloat16()
298
+ print0("PRINT: Broadcasting model parameters...", console=True)
299
+ for param in model.parameters():
300
+ dist.broadcast(param.detach(), 0)
301
+ print0("PRINT: Model constructed and broadcasted.", console=True)
302
+
303
+ # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP ---
304
+ if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope":
305
+ print0("PRINT: Collecting parameters for optimizers...", console=True)
306
+ head_params = [model.lm_head.weight]
307
+ embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds]
308
+
309
+ # Granular collection for attention and MLP parts
310
+ attn_q_params = []
311
+ attn_k_params = []
312
+ attn_v_params = []
313
+ attn_o_params = [] # W_O from c_proj
314
+ mlp_fc_params = []
315
+ mlp_proj_params = []
316
+
317
+ for block_module in model.blocks:
318
+ if block_module.attn is not None:
319
+ # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class
320
+ if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w)
321
+ else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True)
322
+ if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w)
323
+ else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True)
324
+ if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w)
325
+ else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True)
326
+ attn_o_params.append(block_module.attn.c_proj.weight)
327
+ if block_module.mlp is not None:
328
+ mlp_fc_params.append(block_module.mlp.c_fc.weight)
329
+ mlp_proj_params.append(block_module.mlp.c_proj.weight)
330
+
331
+ # Combine into logical groups for experiments
332
+ attn_qk_group = attn_q_params + attn_k_params
333
+ attn_vo_group = attn_v_params + attn_o_params
334
+ all_attn_matrices = attn_qk_group + attn_vo_group
335
+ mlp_w1_group = mlp_fc_params
336
+ mlp_w2_group = mlp_proj_params
337
+ all_mlp_matrices = mlp_fc_params + mlp_proj_params
338
+
339
+ # Scalar parameters (all others not explicitly grouped as matrices)
340
+ matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices)
341
+ scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check]
342
+ for p_scalar in scalar_params: # Sanity check
343
+ if p_scalar.ndim >=2:
344
+ print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True)
345
+
346
+
347
+ # Determine parameter distribution based on optimizer_mode
348
+ muon_params_target_list = []
349
+ adam_matrix_target_list = [] # Matrices that Adam will handle specifically
350
+ adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned)
351
+
352
+ current_optimizer_mode = exp_args.optimizer_mode
353
+ print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True)
354
+
355
+ if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params"
356
+ print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True)
357
+ muon_params_target_list = all_attn_matrices + all_mlp_matrices
358
+ # Adam handles embeds, head, scalars by default. No extra matrices for Adam here.
359
+ elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP
360
+ print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True)
361
+ muon_params_target_list = attn_qk_group
362
+ adam_matrix_target_list = attn_vo_group + all_mlp_matrices
363
+ elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP
364
+ print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True)
365
+ muon_params_target_list = attn_vo_group
366
+ adam_matrix_target_list = attn_qk_group + all_mlp_matrices
367
+ elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP
368
+ print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True)
369
+ muon_params_target_list = all_attn_matrices
370
+ adam_matrix_target_list = all_mlp_matrices
371
+ elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO)
372
+ print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True)
373
+ muon_params_target_list = all_mlp_matrices
374
+ adam_matrix_target_list = all_attn_matrices
375
+ elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam
376
+ print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True)
377
+ muon_params_target_list = []
378
+ adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam
379
+ elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP
380
+ print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True)
381
+ muon_params_target_list = mlp_w2_group
382
+ adam_matrix_target_list = all_attn_matrices + mlp_w1_group
383
+ elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn
384
+ print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True)
385
+ muon_params_target_list = attn_vo_group + all_mlp_matrices
386
+ adam_matrix_target_list = attn_qk_group
387
+ elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP
388
+ print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True)
389
+ muon_params_target_list = attn_vo_group + mlp_w2_group
390
+ adam_matrix_target_list = attn_qk_group + mlp_w1_group
391
+ elif current_optimizer_mode == 9: # Muon on V Attn, MLP
392
+ print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True)
393
+ muon_params_target_list = attn_v_params + all_mlp_matrices
394
+ adam_matrix_target_list = attn_o_params + attn_qk_group
395
+ elif current_optimizer_mode == 10: # Muon on O Attn, MLP
396
+ print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True)
397
+ muon_params_target_list = attn_o_params + all_mlp_matrices
398
+ adam_matrix_target_list = attn_v_params + attn_qk_group
399
+ elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn
400
+ print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True)
401
+ muon_params_target_list = mlp_w1_group
402
+ adam_matrix_target_list = all_attn_matrices + mlp_w2_group
403
+ elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others
404
+ print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True)
405
+ muon_params_target_list = attn_vo_group + mlp_w1_group
406
+ adam_matrix_target_list = attn_qk_group + mlp_w2_group
407
+ elif current_optimizer_mode == 13:
408
+ print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True)
409
+ muon_params_target_list = attn_o_params + mlp_w2_group
410
+ adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group
411
+ elif current_optimizer_mode == 14:
412
+ print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True)
413
+ muon_params_target_list = attn_o_params
414
+ adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices
415
+ elif current_optimizer_mode == 15:
416
+ print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True)
417
+ muon_params_target_list = attn_v_params
418
+ adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices
419
+ else:
420
+ raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}")
421
+
422
+ # Adam optimizer setup
423
+ adam_param_groups_config = [
424
+ dict(params=head_params, lr=adam_matrix_lr),
425
+ dict(params=embed_params, lr=adam_matrix_lr),
426
+ dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam
427
+ ]
428
+ # Add matrices specifically assigned to Adam for this experiment mode
429
+ if adam_matrix_target_list:
430
+ # Ensure adam_matrix_target_list is flat and contains Parameters
431
+ flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None]
432
+ if flat_adam_matrices: # Only add group if there are params
433
+ adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr))
434
+
435
+ # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty)
436
+ adam_param_groups_config = [g for g in adam_param_groups_config if g['params']]
437
+ optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)
438
+ optimizers = [optimizer1] # Start with Adam
439
+
440
+ # Muon optimizer setup
441
+ if muon_params_target_list:
442
+ # Ensure muon_params_target_list is flat, unique, and contains Parameters
443
+ flat_unique_muon_params = []
444
+ seen_muon_ids = set()
445
+ for sublist_or_p in muon_params_target_list:
446
+ for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]):
447
+ if p is not None and id(p) not in seen_muon_ids:
448
+ flat_unique_muon_params.append(p)
449
+ seen_muon_ids.add(id(p))
450
+
451
+ if flat_unique_muon_params: # Only create Muon if it has parameters
452
+ optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps
453
+ optimizers.append(optimizer2)
454
+ else:
455
+ print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True)
456
+ optimizer2 = None # Explicitly set to None if not created
457
+ else:
458
+ print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True)
459
+ optimizer2 = None # Explicitly set to None
460
+
461
+ print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True)
462
+ if optimizer2:
463
+ print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True)
464
+ # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP ---
465
+ elif exp_args.model_parameterization == "gated" :
466
+ print0("PRINT: Collecting parameters for optimizers...", console=True)
467
+ head_params = [model.lm_head.weight]
468
+ embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds]
469
+
470
+ # Granular collection for attention and MLP parts
471
+ attn_q_params = []
472
+ attn_k_params = []
473
+ attn_v_params = []
474
+ attn_o_params = [] # W_O from c_proj
475
+ mlp_fc_params = []
476
+ mlp_proj_params = []
477
+ mlp_up_params = []
478
+
479
+ for block_module in model.blocks:
480
+ if block_module.attn is not None:
481
+ # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class
482
+ if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w)
483
+ else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True)
484
+ if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w)
485
+ else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True)
486
+ if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w)
487
+ else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True)
488
+ attn_o_params.append(block_module.attn.c_proj.weight)
489
+ if block_module.mlp is not None:
490
+ mlp_fc_params.append(block_module.mlp.c_fc.weight)
491
+ mlp_proj_params.append(block_module.mlp.c_proj.weight)
492
+ mlp_up_params.append(block_module.mlp.c_up.weight)
493
+
494
+ # Combine into logical groups for experiments
495
+ attn_qk_group = attn_q_params + attn_k_params
496
+ attn_vo_group = attn_v_params + attn_o_params
497
+ all_attn_matrices = attn_qk_group + attn_vo_group
498
+ mlp_w1_group = mlp_fc_params + mlp_up_params
499
+ mlp_w2_group = mlp_proj_params
500
+ all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params
501
+
502
+ # Scalar parameters (all others not explicitly grouped as matrices)
503
+ matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices)
504
+ scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check]
505
+ for p_scalar in scalar_params: # Sanity check
506
+ if p_scalar.ndim >=2:
507
+ print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True)
508
+
509
+
510
+ # Determine parameter distribution based on optimizer_mode
511
+ muon_params_target_list = []
512
+ adam_matrix_target_list = [] # Matrices that Adam will handle specifically
513
+ adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned)
514
+
515
+ current_optimizer_mode = exp_args.optimizer_mode
516
+ print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True)
517
+
518
+ if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params"
519
+ print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True)
520
+ muon_params_target_list = all_attn_matrices + all_mlp_matrices
521
+ # Adam handles embeds, head, scalars by default. No extra matrices for Adam here.
522
+ elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP
523
+ print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True)
524
+ muon_params_target_list = attn_qk_group
525
+ adam_matrix_target_list = attn_vo_group + all_mlp_matrices
526
+ elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP
527
+ print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True)
528
+ muon_params_target_list = attn_vo_group
529
+ adam_matrix_target_list = attn_qk_group + all_mlp_matrices
530
+ elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP
531
+ print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True)
532
+ muon_params_target_list = all_attn_matrices
533
+ adam_matrix_target_list = all_mlp_matrices
534
+ elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO)
535
+ print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True)
536
+ muon_params_target_list = all_mlp_matrices
537
+ adam_matrix_target_list = all_attn_matrices
538
+ elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam
539
+ print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True)
540
+ muon_params_target_list = []
541
+ adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam
542
+ elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP
543
+ print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True)
544
+ muon_params_target_list = mlp_w2_group
545
+ adam_matrix_target_list = all_attn_matrices + mlp_w1_group
546
+ elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn
547
+ print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True)
548
+ muon_params_target_list = attn_vo_group + all_mlp_matrices
549
+ adam_matrix_target_list = attn_qk_group
550
+ elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP
551
+ print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True)
552
+ muon_params_target_list = attn_vo_group + mlp_w2_group
553
+ adam_matrix_target_list = attn_qk_group + mlp_w1_group
554
+ elif current_optimizer_mode == 9: # Muon on V Attn, MLP
555
+ print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True)
556
+ muon_params_target_list = attn_v_params + all_mlp_matrices
557
+ adam_matrix_target_list = attn_o_params + attn_qk_group
558
+ elif current_optimizer_mode == 10: # Muon on O Attn, MLP
559
+ print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True)
560
+ muon_params_target_list = attn_o_params + all_mlp_matrices
561
+ adam_matrix_target_list = attn_v_params + attn_qk_group
562
+ elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn
563
+ print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True)
564
+ muon_params_target_list = mlp_w1_group
565
+ adam_matrix_target_list = all_attn_matrices + mlp_w2_group
566
+ elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others
567
+ print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True)
568
+ muon_params_target_list = attn_vo_group + mlp_w1_group
569
+ adam_matrix_target_list = attn_qk_group + mlp_w2_group
570
+ else:
571
+ raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}")
572
+
573
+ # Adam optimizer setup
574
+ adam_param_groups_config = [
575
+ dict(params=head_params, lr=adam_matrix_lr),
576
+ dict(params=embed_params, lr=adam_matrix_lr),
577
+ dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam
578
+ ]
579
+ # Add matrices specifically assigned to Adam for this experiment mode
580
+ if adam_matrix_target_list:
581
+ # Ensure adam_matrix_target_list is flat and contains Parameters
582
+ flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None]
583
+ if flat_adam_matrices: # Only add group if there are params
584
+ adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr))
585
+
586
+ # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty)
587
+ adam_param_groups_config = [g for g in adam_param_groups_config if g['params']]
588
+ optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)
589
+ optimizers = [optimizer1] # Start with Adam
590
+
591
+ # Muon optimizer setup
592
+ if muon_params_target_list:
593
+ # Ensure muon_params_target_list is flat, unique, and contains Parameters
594
+ flat_unique_muon_params = []
595
+ seen_muon_ids = set()
596
+ for sublist_or_p in muon_params_target_list:
597
+ for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]):
598
+ if p is not None and id(p) not in seen_muon_ids:
599
+ flat_unique_muon_params.append(p)
600
+ seen_muon_ids.add(id(p))
601
+
602
+ if flat_unique_muon_params: # Only create Muon if it has parameters
603
+ optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0)
604
+ optimizers.append(optimizer2)
605
+ else:
606
+ print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True)
607
+ optimizer2 = None # Explicitly set to None if not created
608
+ else:
609
+ print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True)
610
+ optimizer2 = None # Explicitly set to None
611
+
612
+ print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True)
613
+ if optimizer2:
614
+ print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True)
615
+ # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP ---
616
+ elif exp_args.model_parameterization == "whole":
617
+ hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n]
618
+ embed_params = [p for n, p in model.named_parameters() if "embed" in n]
619
+ scalar_params = [p for p in model.parameters() if p.ndim < 2]
620
+ head_params = [model.lm_head.weight]
621
+
622
+ # init the optimizer(s)
623
+ adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)]
624
+ # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence
625
+ # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094
626
+ optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True)
627
+ optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size)
628
+ optimizers = [optimizer1, optimizer2]
629
+
630
+ for opt in optimizers:
631
+ for group in opt.param_groups:
632
+ group["initial_lr"] = group["lr"]
633
+
634
+ # learning rate schedule: stable then decay (KEEP AS IS, but check assert)
635
+ def get_lr(step: int):
636
+ x = step / args.num_iterations # progress in training
637
+ # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations
638
+ # --- MODIFICATION: Adjust assert for LR schedule ---
639
+ if not (0 <= x <= 1): # Allow x=1 for the last step
640
+ x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations
641
+ # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log
642
+
643
+ if x < 1 - args.cooldown_frac:
644
+ return 1.0
645
+ else:
646
+ # Ensure cooldown_frac is not zero to avoid division by zero
647
+ w = (1 - x) / max(args.cooldown_frac, 1e-9)
648
+ return w * 1.0 + (1 - w) * 0.1
649
+
650
+ # attention window size schedule (KEEP AS IS)
651
+ def next_multiple_of_n(v: float | int, *, n: int):
652
+ return next(x for x in range(n, int(v) + 1 + n, n) if x >= v)
653
+ @lru_cache(1)
654
+ def get_window_size_blocks_helper(window_size: int):
655
+ return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True)
656
+ def get_window_size_blocks(step: int):
657
+ x = step / args.num_iterations # progress in training
658
+ # --- MODIFICATION: Adjust assert for window size schedule ---
659
+ if not (0 <= x <= 1):
660
+ x = min(max(x, 0.0), 1.0) # Clamp x
661
+
662
+ # Ensure window_size is at least 128
663
+ window_size = max(128, next_multiple_of_n(1728 * x, n=128))
664
+ return get_window_size_blocks_helper(window_size)
665
+
666
+ print0("PRINT: Compiling model with TorchInductor...", console=True)
667
+ # Use 'model' for compilation, not 'model_compiled' before it's defined
668
+ model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune")
669
+ print0("PRINT: Model compilation complete.", console=True)
670
+
671
+ ########################################
672
+ # Warmup kernels #
673
+ ########################################
674
+ print0("PRINT: Starting warmup...", console=True)
675
+ warmup_steps = 10
676
+ initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled
677
+ optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers])
678
+ for i in range(warmup_steps):
679
+ # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose
680
+ inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda")
681
+ loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled
682
+ loss.backward()
683
+ for param in model_compiled.parameters(): # Use model_compiled
684
+ if param.grad is not None:
685
+ dist.all_reduce(param.grad, op=dist.ReduceOp.AVG)
686
+ for opt in optimizers:
687
+ opt.step()
688
+ model_compiled.zero_grad(set_to_none=True) # Use model_compiled
689
+ model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled
690
+ for opt, opt_state in zip(optimizers, initial_state["optimizers"]):
691
+ opt.load_state_dict(opt_state)
692
+ del initial_state
693
+ print0("PRINT: Warmup complete.", console=True)
694
+ torch.cuda.synchronize()
695
+
696
+
697
+ params_to_analyze = []
698
+
699
+ if exp_args.model_parameterization == "whole":
700
+ params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad]
701
+ elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated":
702
+ params_to_analyze = all_attn_matrices + all_mlp_matrices
703
+ matrix_groups_for_svd = {}
704
+ if master_process:
705
+ matrix_groups_for_svd = {
706
+ "attn_qk": attn_qk_group,
707
+ "attn_vo": attn_vo_group,
708
+ "mlp_w1": mlp_w1_group,
709
+ "mlp_w2": mlp_proj_params
710
+ }
711
+
712
+
713
+
714
+ ########################################
715
+ # Training and validation #
716
+ ########################################
717
+ print0("PRINT: Starting training...", console=True)
718
+ train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size)
719
+ training_time_ms = 0
720
+ torch.cuda.synchronize()
721
+ t0 = time.perf_counter()
722
+ train_steps = args.num_iterations
723
+
724
+ for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation)
725
+ last_step = (step == train_steps)
726
+
727
+ # --------------- VALIDATION SECTION -----------------
728
+ # Validate at step 0 (after warmup), at specified intervals, and at the very last step
729
+ if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0):
730
+ torch.cuda.synchronize()
731
+ # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0)
732
+ if step > 0 : # For step 0, t0 hasn't started a training segment yet
733
+ current_run_time = 1000 * (time.perf_counter() - t0)
734
+ training_time_ms += current_run_time
735
+
736
+ model_compiled.eval() # Use model_compiled
737
+ val_batch_size = world_size * args.val_seq_len
738
+ # Ensure val_tokens is divisible by val_batch_size, or handle remainder
739
+ if args.val_tokens % val_batch_size != 0:
740
+ print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True)
741
+ val_num_steps = args.val_tokens // val_batch_size
742
+
743
+ val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size)
744
+ val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device
745
+ actual_val_steps = 0
746
+ with torch.no_grad():
747
+ for val_i in range(val_num_steps):
748
+ try:
749
+ inputs, targets = next(val_loader)
750
+ loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled
751
+ val_loss_sum += loss_val
752
+ actual_val_steps += 1
753
+ except StopIteration:
754
+ print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True)
755
+ break # Stop if data runs out
756
+
757
+ if actual_val_steps > 0:
758
+ val_loss_avg = val_loss_sum / actual_val_steps
759
+ else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue)
760
+ val_loss_avg = torch.tensor(float('nan'), device=device)
761
+ print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True)
762
+
763
+ del val_loader # Clean up
764
+ dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss
765
+
766
+ svd_log_str = ""
767
+ if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd:
768
+ TOPK = 10
769
+ svd_results_by_category = {}
770
+
771
+ with torch.no_grad():
772
+ # per-category metrics (average over matrices in the group)
773
+ for name, group_params in matrix_groups_for_svd.items():
774
+ if not group_params:
775
+ continue
776
+ mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params]
777
+ if mets:
778
+ avg_entropy = float(np.mean([m['entropy_norm'] for m in mets]))
779
+ avg_erank = float(np.mean([m['erank'] for m in mets]))
780
+ avg_topkE = float(np.mean([m['topk_energy'] for m in mets]))
781
+ avg_qratio = float(np.mean([m['q75_q25'] for m in mets]))
782
+ svd_results_by_category[name] = dict(
783
+ entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio
784
+ )
785
+
786
+ # VO product as another category
787
+ vo_mets = []
788
+ num_layers = len(attn_v_params)
789
+ for i in range(num_layers):
790
+ w_v = attn_v_params[i]
791
+ w_o = attn_o_params[i]
792
+ w_ov_product = torch.matmul(w_o, w_v)
793
+ vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK))
794
+ if vo_mets:
795
+ svd_results_by_category['vo_prod'] = dict(
796
+ entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])),
797
+ erank=float(np.mean([m['erank'] for m in vo_mets])),
798
+ topkE=float(np.mean([m['topk_energy'] for m in vo_mets])),
799
+ q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])),
800
+ )
801
+
802
+ # format logging string (append metrics after entropy)
803
+ svd_log_parts = []
804
+ for name, vals in svd_results_by_category.items():
805
+ svd_log_parts.append(
806
+ f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}"
807
+ )
808
+ svd_log_str = " ".join(svd_log_parts)
809
+
810
+
811
+ # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative.
812
+ avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0
813
+ print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True)
814
+
815
+ model_compiled.train() # Switch back to train mode
816
+ torch.cuda.synchronize()
817
+ t0 = time.perf_counter() # Reset timer for the next training segment
818
+
819
+ if last_step:
820
+ if master_process and args.save_checkpoint:
821
+ if run_dir_path_str: # Ensure run_dir_path_str is set by master process
822
+ checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints"
823
+ checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir
824
+ checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt"
825
+ log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled
826
+ optimizers=[opt.state_dict() for opt in optimizers])
827
+ torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save
828
+ print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True)
829
+ else:
830
+ print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True)
831
+ break
832
+
833
+ # --------------- TRAINING SECTION -----------------
834
+ try:
835
+ inputs, targets = next(train_loader)
836
+ except StopIteration:
837
+ print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True)
838
+ break # End if data runs out
839
+
840
+ loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled
841
+ loss_train.backward()
842
+
843
+ for param in model_compiled.parameters(): # Use model_compiled
844
+ if param.grad is not None: # Check if grad exists
845
+ dist.all_reduce(param.grad, op=dist.ReduceOp.AVG)
846
+
847
+ current_lr_val = get_lr(step)
848
+ for opt in optimizers:
849
+ for group in opt.param_groups:
850
+ group["lr"] = group["initial_lr"] * current_lr_val
851
+
852
+ # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists ---
853
+ if optimizer2 is not None: # Check if Muon optimizer was created
854
+ for group in optimizer2.param_groups:
855
+ frac = min(step / 300, 1) # momentum warmup for muon
856
+ group["momentum"] = (1 - frac) * 0.85 + frac * 0.95
857
+
858
+ for opt in optimizers:
859
+ opt.step()
860
+
861
+ model_compiled.zero_grad(set_to_none=True) # Use model_compiled
862
+
863
+ # Logging (less frequent for training steps)
864
+ if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val
865
+ # This time is for the current segment since last validation / t0 reset
866
+ current_segment_time_ms = 1000 * (time.perf_counter() - t0)
867
+ # approx_training_time_ms is the total cumulative time
868
+ approx_total_training_time_ms = training_time_ms + current_segment_time_ms
869
+
870
+ total_tokens_in_batch = args.train_seq_len * world_size
871
+ train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item()
872
+
873
+ print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too
874
+
875
+ print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True)
876
+ print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB "
877
+ f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True)
878
+
879
+ if dist.is_initialized():
880
+ dist.destroy_process_group()
881
+ [2025-09-04 15:58:23] [Rank 0] import os
882
+ import sys
883
+ with open(sys.argv[0]) as f:
884
+ code = f.read() # read the code of this file ASAP, for logging
885
+ import uuid
886
+ import time
887
+ import copy
888
+ import glob
889
+ from dataclasses import dataclass, asdict
890
+ from functools import lru_cache
891
+ from pathlib import Path
892
+ import argparse # Keep argparse for --unet and potentially --optimizer_mode
893
+ import json
894
+ import random
895
+ import numpy as np
896
+
897
+ os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
898
+ import torch
899
+ torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems
900
+ from torch import Tensor, nn
901
+ import torch.nn.functional as F
902
+ import torch.distributed as dist
903
+ # use of FlexAttention contributed by @KoszarskyB
904
+ from torch.nn.attention.flex_attention import BlockMask, flex_attention
905
+ sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present
906
+ from optimizers.MUON_new import Muon
907
+ from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed
908
+
909
+ #from kn_util.utils import setup_debugpy
910
+ #torch._inductor.config.coordinate_descent_tuning = True
911
+
912
+ # -----------------------------------------------------------------------------
913
+
914
+ mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports
915
+
916
+ # -----------------------------------------------------------------------------
917
+ # Seeding Function
918
+ def set_seed(seed):
919
+ random.seed(seed)
920
+ np.random.seed(seed)
921
+ torch.manual_seed(seed)
922
+ if torch.cuda.is_available():
923
+ torch.cuda.manual_seed_all(seed)
924
+ print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks
925
+
926
+ # -----------------------------------------------------------------------------
927
+ # Our own simple Distributed Data Loader (KEEP AS IS)
928
+ def _load_data_shard(file: Path):
929
+ header = torch.from_file(str(file), False, 256, dtype=torch.int32)
930
+ assert header[0] == 20240520, "magic number mismatch in the data .bin file"
931
+ assert header[1] == 1, "unsupported version"
932
+ num_tokens = int(header[2])
933
+ with file.open("rb", buffering=0) as f:
934
+ tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True)
935
+ f.seek(256 * 4)
936
+ nbytes = f.readinto(tokens.numpy())
937
+ assert nbytes == 2 * num_tokens, "number of tokens read does not match header"
938
+ return tokens
939
+
940
+ def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int):
941
+ files = [Path(file) for file in sorted(glob.glob(filename_pattern))]
942
+ assert batch_size % world_size == 0
943
+ local_batch_size = batch_size // world_size
944
+ file_iter = iter(files) # use itertools.cycle(files) instead if you want to do multi-epoch training
945
+ tokens, pos = _load_data_shard(next(file_iter)), 0
946
+ while True:
947
+ if pos + batch_size + 1 >= len(tokens):
948
+ tokens, pos = _load_data_shard(next(file_iter)), 0
949
+ buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1]
950
+ inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side;
951
+ targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful.
952
+ pos += batch_size
953
+ yield inputs, targets
954
+
955
+ # ---- ADD: spectral metrics helper right after calculate_svd_entropy ----
956
+ def calculate_svd_metrics(matrix: torch.Tensor, *, topk: int = 10):
957
+ """
958
+ Returns dict with:
959
+ - entropy_norm: normalized SVD entropy (same normalization as your function)
960
+ - erank: effective rank = exp(Shannon entropy of p)
961
+ - topk_energy: sum of top-k p_i (energy fraction in the top-k singular values)
962
+ - q75_q25: ratio of 75th to 25th percentile of eigenvalues (sigma^2)
963
+ """
964
+ with torch.no_grad():
965
+ s = torch.linalg.svdvals(matrix.detach().to('cpu', torch.float32))
966
+ s = s[s > 1e-9]
967
+ n = s.numel()
968
+ if n == 0:
969
+ return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf'))
970
+
971
+ s2 = s * s
972
+ S2_sum = float(torch.sum(s2))
973
+ if S2_sum == 0.0:
974
+ return dict(entropy_norm=0.0, erank=0.0, topk_energy=0.0, q75_q25=float('inf'))
975
+
976
+ p = s2 / S2_sum # energy distribution
977
+ # Shannon entropy H (natural log)
978
+ H = float(torch.sum(torch.special.entr(p)))
979
+ entropy_norm = H / np.log(max(n, 2)) # same normalization as your SVD entropy
980
+ erank = float(np.exp(H))
981
+
982
+ k = min(topk, n)
983
+ topk_energy = float(torch.topk(p, k).values.sum())
984
+
985
+ # eigenvalues = s^2, use quantiles on s^2
986
+ q25 = float(torch.quantile(s2, 0.25))
987
+ q75 = float(torch.quantile(s2, 0.75))
988
+ q75_q25 = (q75 / q25) if q25 > 0 else float('inf')
989
+
990
+ return dict(
991
+ entropy_norm=entropy_norm,
992
+ erank=erank,
993
+ topk_energy=topk_energy,
994
+ q75_q25=q75_q25,
995
+ )
996
+
997
+
998
+ # -----------------------------------------------------------------------------
999
+ # int main
1000
+ parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon")
1001
+ parser.add_argument("--unet", action="store_true", help="Use U-net architecture")
1002
+ parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility")
1003
+ # --- MODIFICATION: Add optimizer_mode as a CLI argument ---
1004
+ parser.add_argument("--optimizer_mode", type=int, default=0,
1005
+ help="Defines how Muon is applied. "
1006
+ "0: Muon(All Hidden Attn+MLP - original); "
1007
+ "1: Muon(QK Attn)/Adam(VO Attn,MLP); "
1008
+ "2: Muon(VO Attn)/Adam(QK Attn,MLP); "
1009
+ "3: Muon(All Attn)/Adam(MLP); "
1010
+ "4: Muon(MLP)/Adam(All Attn)"
1011
+ "5: All Adam (No Muon, all applicable matrices to Adam)."
1012
+ "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)."
1013
+ "7: Muon(VO Attn, MLP)/Adam(QK Attn)."
1014
+ "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)."
1015
+ "11: Muon(W_1)/Adam(O Attn, QK Attn)."
1016
+ )
1017
+ parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"])
1018
+ parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices")
1019
+ parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices")
1020
+ parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs")
1021
+ exp_args = parser.parse_args()
1022
+ set_seed(exp_args.seed)
1023
+
1024
+ # --- MODIFICATION: Import correct GPT model based on --unet flag ---
1025
+ if exp_args.unet:
1026
+ print("Using U-net architecture")
1027
+ from models.nano_GPT_unet import GPT
1028
+ elif exp_args.model_parameterization == "qkvo":
1029
+ print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w")
1030
+ # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w
1031
+
1032
+ from models.nano_GPT_qkvo import GPT
1033
+
1034
+ elif exp_args.model_parameterization == "norope":
1035
+ print("Using architecture (models.nano_GPT_norope) with CausalSelfAttention having q_w, k_w, v_w")
1036
+ from models.nano_GPT_norope import GPT
1037
+
1038
+ elif exp_args.model_parameterization == "gated":
1039
+ print("Using architecture (models.nano_GPT_gated) with CausalSelfAttention having q_w, k_w, v_w")
1040
+ from models.nano_GPT_gated import GPT
1041
+
1042
+ elif exp_args.model_parameterization == "whole":
1043
+ print("Using original architecture")
1044
+ from models.nano_GPT import GPT
1045
+
1046
+ @dataclass
1047
+ class Hyperparameters:
1048
+ # data
1049
+
1050
+ #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin"
1051
+ #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin"
1052
+ train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin"
1053
+ val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin"
1054
+ val_tokens = 1966080
1055
+ #val_tokens = 10485760
1056
+ train_seq_len = 12*1024
1057
+ val_seq_len = 4*16*1024
1058
+ #train_seq_len = 48*1024 # FlexAttention sequence length
1059
+ #train_seq_len = 12*1024 # FlexAttention sequence length
1060
+ #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation
1061
+
1062
+ # optimization
1063
+ num_iterations = 10000 #1770 # Original: 1770
1064
+ cooldown_frac = 0.4
1065
+ # architecture
1066
+
1067
+ vocab_size = 50257
1068
+
1069
+ # evaluation and logging
1070
+ val_loss_every = 200 # Original: 125
1071
+ save_checkpoint = False
1072
+ args = Hyperparameters()
1073
+
1074
+ # DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used)
1075
+ rank = int(os.environ.get("RANK", 0))
1076
+ local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting
1077
+ world_size = int(os.environ.get("WORLD_SIZE", 1))
1078
+
1079
+ # print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug
1080
+
1081
+ assert torch.cuda.is_available()
1082
+ device = torch.device("cuda", local_rank) # Use local_rank for device
1083
+ torch.cuda.set_device(device)
1084
+
1085
+ if not dist.is_initialized(): # Ensure DDP is initialized only once
1086
+ dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size
1087
+ dist.barrier()
1088
+ master_process = (rank == 0)
1089
+
1090
+ # Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename)
1091
+ logfile = None
1092
+ # --- MODIFICATION: Add optimizer_mode to log file name and specify new dir ---
1093
+ #log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes"
1094
+ #if master_process:
1095
+ # run_id = uuid.uuid4()
1096
+ # os.makedirs(log_dir, exist_ok=True) # Create new log directory
1097
+ # logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt"
1098
+ # print(f"Logging to: {logfile}")
1099
+
1100
+ logfile = None
1101
+ run_dir_path_str = None
1102
+
1103
+ base_log_dir = Path(exp_args.base_dir)
1104
+
1105
+ if master_process:
1106
+ # Set seed again specifically for master process for operations like dir creation, config saving
1107
+ set_seed(exp_args.seed)
1108
+
1109
+ # Construct folder name based on config and seed
1110
+ run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}"
1111
+ run_dir_path = base_log_dir / run_folder_name
1112
+ run_dir_path.mkdir(parents=True, exist_ok=True)
1113
+ run_dir_path_str = str(run_dir_path)
1114
+
1115
+ run_uuid = uuid.uuid4()
1116
+ logfile = run_dir_path / f"training_log_{run_uuid}.txt"
1117
+ print(f"Logging to: {logfile}")
1118
+
1119
+ # Save configuration
1120
+ config_to_save = {
1121
+ "cli_args": vars(exp_args),
1122
+ "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)},
1123
+ "run_uuid_for_log": str(run_uuid),
1124
+ "script_code_logged_at_start": True
1125
+ }
1126
+ config_file_path = run_dir_path / "config.json"
1127
+ with open(config_file_path, "w") as f:
1128
+ json.dump(config_to_save, f, indent=4)
1129
+ print(f"Saved configuration to: {config_file_path}")
1130
+
1131
+ def print0(s, console=False):
1132
+ if master_process:
1133
+ # Add timestamp and rank for better log readability
1134
+ timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
1135
+ log_message = f"[{timestamp}] [Rank {rank}] {s}"
1136
+
1137
+ # Print to console if requested or if it's a specific "PRINT:" message
1138
+ if console or s.startswith("PRINT:"):
1139
+ actual_s = s[6:] if s.startswith("PRINT:") else s
1140
+ print(actual_s) # Print to stdout for master process
1141
+
1142
+ if logfile:
1143
+ with open(logfile, "a") as f:
1144
+ f.write(log_message + "\n")
1145
+
1146
+ with open(logfile, "a") as f:
1147
+ f.write(log_message + "\n")
1148
+
1149
+
1150
+ print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True)
1151
+ print0(f"PRINT: Parsed CLI args: {exp_args}", console=True)
1152
+ print0(f"PRINT: Hyperparameters: {args}", console=True)
1153
+ print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True)
1154
+ if master_process:
1155
+ print0(f"PRINT: Run directory: {run_dir_path_str}", console=True)
1156
+ print0(code) # Log the code
1157
+ # ... (other initial logs)
1158
+
1159
+ ########################################
1160
+ # Construct model and optimizer #
1161
+ ########################################
1162
+ print0("PRINT: Constructing model...", console=True)
1163
+ model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768,
1164
+ max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda()
1165
+ for m in model.modules():
1166
+ if isinstance(m, nn.Embedding):
1167
+ m.bfloat16()
1168
+ print0("PRINT: Broadcasting model parameters...", console=True)
1169
+ for param in model.parameters():
1170
+ dist.broadcast(param.detach(), 0)
1171
+ print0("PRINT: Model constructed and broadcasted.", console=True)
1172
+
1173
+ # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP ---
1174
+ if exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "norope":
1175
+ print0("PRINT: Collecting parameters for optimizers...", console=True)
1176
+ head_params = [model.lm_head.weight]
1177
+ embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds]
1178
+
1179
+ # Granular collection for attention and MLP parts
1180
+ attn_q_params = []
1181
+ attn_k_params = []
1182
+ attn_v_params = []
1183
+ attn_o_params = [] # W_O from c_proj
1184
+ mlp_fc_params = []
1185
+ mlp_proj_params = []
1186
+
1187
+ for block_module in model.blocks:
1188
+ if block_module.attn is not None:
1189
+ # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class
1190
+ if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w)
1191
+ else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True)
1192
+ if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w)
1193
+ else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True)
1194
+ if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w)
1195
+ else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True)
1196
+ attn_o_params.append(block_module.attn.c_proj.weight)
1197
+ if block_module.mlp is not None:
1198
+ mlp_fc_params.append(block_module.mlp.c_fc.weight)
1199
+ mlp_proj_params.append(block_module.mlp.c_proj.weight)
1200
+
1201
+ # Combine into logical groups for experiments
1202
+ attn_qk_group = attn_q_params + attn_k_params
1203
+ attn_vo_group = attn_v_params + attn_o_params
1204
+ all_attn_matrices = attn_qk_group + attn_vo_group
1205
+ mlp_w1_group = mlp_fc_params
1206
+ mlp_w2_group = mlp_proj_params
1207
+ all_mlp_matrices = mlp_fc_params + mlp_proj_params
1208
+
1209
+ # Scalar parameters (all others not explicitly grouped as matrices)
1210
+ matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices)
1211
+ scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check]
1212
+ for p_scalar in scalar_params: # Sanity check
1213
+ if p_scalar.ndim >=2:
1214
+ print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True)
1215
+
1216
+
1217
+ # Determine parameter distribution based on optimizer_mode
1218
+ muon_params_target_list = []
1219
+ adam_matrix_target_list = [] # Matrices that Adam will handle specifically
1220
+ adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned)
1221
+
1222
+ current_optimizer_mode = exp_args.optimizer_mode
1223
+ print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True)
1224
+
1225
+ if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params"
1226
+ print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True)
1227
+ muon_params_target_list = all_attn_matrices + all_mlp_matrices
1228
+ # Adam handles embeds, head, scalars by default. No extra matrices for Adam here.
1229
+ elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP
1230
+ print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True)
1231
+ muon_params_target_list = attn_qk_group
1232
+ adam_matrix_target_list = attn_vo_group + all_mlp_matrices
1233
+ elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP
1234
+ print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True)
1235
+ muon_params_target_list = attn_vo_group
1236
+ adam_matrix_target_list = attn_qk_group + all_mlp_matrices
1237
+ elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP
1238
+ print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True)
1239
+ muon_params_target_list = all_attn_matrices
1240
+ adam_matrix_target_list = all_mlp_matrices
1241
+ elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO)
1242
+ print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True)
1243
+ muon_params_target_list = all_mlp_matrices
1244
+ adam_matrix_target_list = all_attn_matrices
1245
+ elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam
1246
+ print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True)
1247
+ muon_params_target_list = []
1248
+ adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam
1249
+ elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP
1250
+ print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True)
1251
+ muon_params_target_list = mlp_w2_group
1252
+ adam_matrix_target_list = all_attn_matrices + mlp_w1_group
1253
+ elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn
1254
+ print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True)
1255
+ muon_params_target_list = attn_vo_group + all_mlp_matrices
1256
+ adam_matrix_target_list = attn_qk_group
1257
+ elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP
1258
+ print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True)
1259
+ muon_params_target_list = attn_vo_group + mlp_w2_group
1260
+ adam_matrix_target_list = attn_qk_group + mlp_w1_group
1261
+ elif current_optimizer_mode == 9: # Muon on V Attn, MLP
1262
+ print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True)
1263
+ muon_params_target_list = attn_v_params + all_mlp_matrices
1264
+ adam_matrix_target_list = attn_o_params + attn_qk_group
1265
+ elif current_optimizer_mode == 10: # Muon on O Attn, MLP
1266
+ print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True)
1267
+ muon_params_target_list = attn_o_params + all_mlp_matrices
1268
+ adam_matrix_target_list = attn_v_params + attn_qk_group
1269
+ elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn
1270
+ print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True)
1271
+ muon_params_target_list = mlp_w1_group
1272
+ adam_matrix_target_list = all_attn_matrices + mlp_w2_group
1273
+ elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others
1274
+ print0(f"PRINT: Mode 12: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True)
1275
+ muon_params_target_list = attn_vo_group + mlp_w1_group
1276
+ adam_matrix_target_list = attn_qk_group + mlp_w2_group
1277
+ elif current_optimizer_mode == 13:
1278
+ print0(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True)
1279
+ muon_params_target_list = attn_o_params + mlp_w2_group
1280
+ adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group
1281
+ elif current_optimizer_mode == 14:
1282
+ print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True)
1283
+ muon_params_target_list = attn_o_params
1284
+ adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices
1285
+ elif current_optimizer_mode == 15:
1286
+ print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True)
1287
+ muon_params_target_list = attn_v_params
1288
+ adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices
1289
+ else:
1290
+ raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}")
1291
+
1292
+ # Adam optimizer setup
1293
+ adam_param_groups_config = [
1294
+ dict(params=head_params, lr=adam_matrix_lr),
1295
+ dict(params=embed_params, lr=adam_matrix_lr),
1296
+ dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam
1297
+ ]
1298
+ # Add matrices specifically assigned to Adam for this experiment mode
1299
+ if adam_matrix_target_list:
1300
+ # Ensure adam_matrix_target_list is flat and contains Parameters
1301
+ flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None]
1302
+ if flat_adam_matrices: # Only add group if there are params
1303
+ adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr))
1304
+
1305
+ # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty)
1306
+ adam_param_groups_config = [g for g in adam_param_groups_config if g['params']]
1307
+ optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)
1308
+ optimizers = [optimizer1] # Start with Adam
1309
+
1310
+ # Muon optimizer setup
1311
+ if muon_params_target_list:
1312
+ # Ensure muon_params_target_list is flat, unique, and contains Parameters
1313
+ flat_unique_muon_params = []
1314
+ seen_muon_ids = set()
1315
+ for sublist_or_p in muon_params_target_list:
1316
+ for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]):
1317
+ if p is not None and id(p) not in seen_muon_ids:
1318
+ flat_unique_muon_params.append(p)
1319
+ seen_muon_ids.add(id(p))
1320
+
1321
+ if flat_unique_muon_params: # Only create Muon if it has parameters
1322
+ optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0) # Pass nesterov, ns_steps
1323
+ optimizers.append(optimizer2)
1324
+ else:
1325
+ print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True)
1326
+ optimizer2 = None # Explicitly set to None if not created
1327
+ else:
1328
+ print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True)
1329
+ optimizer2 = None # Explicitly set to None
1330
+
1331
+ print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True)
1332
+ if optimizer2:
1333
+ print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True)
1334
+ # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP ---
1335
+ elif exp_args.model_parameterization == "gated" :
1336
+ print0("PRINT: Collecting parameters for optimizers...", console=True)
1337
+ head_params = [model.lm_head.weight]
1338
+ embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds]
1339
+
1340
+ # Granular collection for attention and MLP parts
1341
+ attn_q_params = []
1342
+ attn_k_params = []
1343
+ attn_v_params = []
1344
+ attn_o_params = [] # W_O from c_proj
1345
+ mlp_fc_params = []
1346
+ mlp_proj_params = []
1347
+ mlp_up_params = []
1348
+
1349
+ for block_module in model.blocks:
1350
+ if block_module.attn is not None:
1351
+ # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class
1352
+ if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w)
1353
+ else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True)
1354
+ if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w)
1355
+ else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True)
1356
+ if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w)
1357
+ else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True)
1358
+ attn_o_params.append(block_module.attn.c_proj.weight)
1359
+ if block_module.mlp is not None:
1360
+ mlp_fc_params.append(block_module.mlp.c_fc.weight)
1361
+ mlp_proj_params.append(block_module.mlp.c_proj.weight)
1362
+ mlp_up_params.append(block_module.mlp.c_up.weight)
1363
+
1364
+ # Combine into logical groups for experiments
1365
+ attn_qk_group = attn_q_params + attn_k_params
1366
+ attn_vo_group = attn_v_params + attn_o_params
1367
+ all_attn_matrices = attn_qk_group + attn_vo_group
1368
+ mlp_w1_group = mlp_fc_params + mlp_up_params
1369
+ mlp_w2_group = mlp_proj_params
1370
+ all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params
1371
+
1372
+ # Scalar parameters (all others not explicitly grouped as matrices)
1373
+ matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices)
1374
+ scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check]
1375
+ for p_scalar in scalar_params: # Sanity check
1376
+ if p_scalar.ndim >=2:
1377
+ print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True)
1378
+
1379
+
1380
+ # Determine parameter distribution based on optimizer_mode
1381
+ muon_params_target_list = []
1382
+ adam_matrix_target_list = [] # Matrices that Adam will handle specifically
1383
+ adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned)
1384
+
1385
+ current_optimizer_mode = exp_args.optimizer_mode
1386
+ print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True)
1387
+
1388
+ if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params"
1389
+ print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True)
1390
+ muon_params_target_list = all_attn_matrices + all_mlp_matrices
1391
+ # Adam handles embeds, head, scalars by default. No extra matrices for Adam here.
1392
+ elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP
1393
+ print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True)
1394
+ muon_params_target_list = attn_qk_group
1395
+ adam_matrix_target_list = attn_vo_group + all_mlp_matrices
1396
+ elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP
1397
+ print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True)
1398
+ muon_params_target_list = attn_vo_group
1399
+ adam_matrix_target_list = attn_qk_group + all_mlp_matrices
1400
+ elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP
1401
+ print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True)
1402
+ muon_params_target_list = all_attn_matrices
1403
+ adam_matrix_target_list = all_mlp_matrices
1404
+ elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO)
1405
+ print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True)
1406
+ muon_params_target_list = all_mlp_matrices
1407
+ adam_matrix_target_list = all_attn_matrices
1408
+ elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam
1409
+ print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True)
1410
+ muon_params_target_list = []
1411
+ adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam
1412
+ elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP
1413
+ print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True)
1414
+ muon_params_target_list = mlp_w2_group
1415
+ adam_matrix_target_list = all_attn_matrices + mlp_w1_group
1416
+ elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn
1417
+ print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True)
1418
+ muon_params_target_list = attn_vo_group + all_mlp_matrices
1419
+ adam_matrix_target_list = attn_qk_group
1420
+ elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP
1421
+ print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True)
1422
+ muon_params_target_list = attn_vo_group + mlp_w2_group
1423
+ adam_matrix_target_list = attn_qk_group + mlp_w1_group
1424
+ elif current_optimizer_mode == 9: # Muon on V Attn, MLP
1425
+ print0(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True)
1426
+ muon_params_target_list = attn_v_params + all_mlp_matrices
1427
+ adam_matrix_target_list = attn_o_params + attn_qk_group
1428
+ elif current_optimizer_mode == 10: # Muon on O Attn, MLP
1429
+ print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True)
1430
+ muon_params_target_list = attn_o_params + all_mlp_matrices
1431
+ adam_matrix_target_list = attn_v_params + attn_qk_group
1432
+ elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn
1433
+ print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True)
1434
+ muon_params_target_list = mlp_w1_group
1435
+ adam_matrix_target_list = all_attn_matrices + mlp_w2_group
1436
+ elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others
1437
+ print0(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).", console=True)
1438
+ muon_params_target_list = attn_vo_group + mlp_w1_group
1439
+ adam_matrix_target_list = attn_qk_group + mlp_w2_group
1440
+ else:
1441
+ raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}")
1442
+
1443
+ # Adam optimizer setup
1444
+ adam_param_groups_config = [
1445
+ dict(params=head_params, lr=adam_matrix_lr),
1446
+ dict(params=embed_params, lr=adam_matrix_lr),
1447
+ dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam
1448
+ ]
1449
+ # Add matrices specifically assigned to Adam for this experiment mode
1450
+ if adam_matrix_target_list:
1451
+ # Ensure adam_matrix_target_list is flat and contains Parameters
1452
+ flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None]
1453
+ if flat_adam_matrices: # Only add group if there are params
1454
+ adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr))
1455
+
1456
+ # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty)
1457
+ adam_param_groups_config = [g for g in adam_param_groups_config if g['params']]
1458
+ optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)
1459
+ optimizers = [optimizer1] # Start with Adam
1460
+
1461
+ # Muon optimizer setup
1462
+ if muon_params_target_list:
1463
+ # Ensure muon_params_target_list is flat, unique, and contains Parameters
1464
+ flat_unique_muon_params = []
1465
+ seen_muon_ids = set()
1466
+ for sublist_or_p in muon_params_target_list:
1467
+ for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]):
1468
+ if p is not None and id(p) not in seen_muon_ids:
1469
+ flat_unique_muon_params.append(p)
1470
+ seen_muon_ids.add(id(p))
1471
+
1472
+ if flat_unique_muon_params: # Only create Muon if it has parameters
1473
+ optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95, weight_decay=0.0)
1474
+ optimizers.append(optimizer2)
1475
+ else:
1476
+ print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True)
1477
+ optimizer2 = None # Explicitly set to None if not created
1478
+ else:
1479
+ print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True)
1480
+ optimizer2 = None # Explicitly set to None
1481
+
1482
+ print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True)
1483
+ if optimizer2:
1484
+ print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True)
1485
+ # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP ---
1486
+ elif exp_args.model_parameterization == "whole":
1487
+ hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n]
1488
+ embed_params = [p for n, p in model.named_parameters() if "embed" in n]
1489
+ scalar_params = [p for p in model.parameters() if p.ndim < 2]
1490
+ head_params = [model.lm_head.weight]
1491
+
1492
+ # init the optimizer(s)
1493
+ adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)]
1494
+ # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence
1495
+ # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094
1496
+ optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True)
1497
+ optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size)
1498
+ optimizers = [optimizer1, optimizer2]
1499
+
1500
+ for opt in optimizers:
1501
+ for group in opt.param_groups:
1502
+ group["initial_lr"] = group["lr"]
1503
+
1504
+ # learning rate schedule: stable then decay (KEEP AS IS, but check assert)
1505
+ def get_lr(step: int):
1506
+ x = step / args.num_iterations # progress in training
1507
+ # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations
1508
+ # --- MODIFICATION: Adjust assert for LR schedule ---
1509
+ if not (0 <= x <= 1): # Allow x=1 for the last step
1510
+ x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations
1511
+ # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log
1512
+
1513
+ if x < 1 - args.cooldown_frac:
1514
+ return 1.0
1515
+ else:
1516
+ # Ensure cooldown_frac is not zero to avoid division by zero
1517
+ w = (1 - x) / max(args.cooldown_frac, 1e-9)
1518
+ return w * 1.0 + (1 - w) * 0.1
1519
+
1520
+ # attention window size schedule (KEEP AS IS)
1521
+ def next_multiple_of_n(v: float | int, *, n: int):
1522
+ return next(x for x in range(n, int(v) + 1 + n, n) if x >= v)
1523
+ @lru_cache(1)
1524
+ def get_window_size_blocks_helper(window_size: int):
1525
+ return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True)
1526
+ def get_window_size_blocks(step: int):
1527
+ x = step / args.num_iterations # progress in training
1528
+ # --- MODIFICATION: Adjust assert for window size schedule ---
1529
+ if not (0 <= x <= 1):
1530
+ x = min(max(x, 0.0), 1.0) # Clamp x
1531
+
1532
+ # Ensure window_size is at least 128
1533
+ window_size = max(128, next_multiple_of_n(1728 * x, n=128))
1534
+ return get_window_size_blocks_helper(window_size)
1535
+
1536
+ print0("PRINT: Compiling model with TorchInductor...", console=True)
1537
+ # Use 'model' for compilation, not 'model_compiled' before it's defined
1538
+ model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune")
1539
+ print0("PRINT: Model compilation complete.", console=True)
1540
+
1541
+ ########################################
1542
+ # Warmup kernels #
1543
+ ########################################
1544
+ print0("PRINT: Starting warmup...", console=True)
1545
+ warmup_steps = 10
1546
+ initial_state = dict(model=copy.deepcopy(model_compiled.state_dict()), # Use model_compiled
1547
+ optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers])
1548
+ for i in range(warmup_steps):
1549
+ # print0(f"Warmup step {i+1}/{warmup_steps}", console=False) # Less verbose
1550
+ inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda")
1551
+ loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) # Use model_compiled
1552
+ loss.backward()
1553
+ for param in model_compiled.parameters(): # Use model_compiled
1554
+ if param.grad is not None:
1555
+ dist.all_reduce(param.grad, op=dist.ReduceOp.AVG)
1556
+ for opt in optimizers:
1557
+ opt.step()
1558
+ model_compiled.zero_grad(set_to_none=True) # Use model_compiled
1559
+ model_compiled.load_state_dict(initial_state["model"]) # Use model_compiled
1560
+ for opt, opt_state in zip(optimizers, initial_state["optimizers"]):
1561
+ opt.load_state_dict(opt_state)
1562
+ del initial_state
1563
+ print0("PRINT: Warmup complete.", console=True)
1564
+ torch.cuda.synchronize()
1565
+
1566
+
1567
+ params_to_analyze = []
1568
+
1569
+ if exp_args.model_parameterization == "whole":
1570
+ params_to_analyze = [p for p in hidden_matrix_params if p.requires_grad]
1571
+ elif exp_args.model_parameterization == "qkvo" or exp_args.model_parameterization == "gated":
1572
+ params_to_analyze = all_attn_matrices + all_mlp_matrices
1573
+ matrix_groups_for_svd = {}
1574
+ if master_process:
1575
+ matrix_groups_for_svd = {
1576
+ "attn_qk": attn_qk_group,
1577
+ "attn_vo": attn_vo_group,
1578
+ "mlp_w1": mlp_w1_group,
1579
+ "mlp_w2": mlp_proj_params
1580
+ }
1581
+
1582
+
1583
+
1584
+ ########################################
1585
+ # Training and validation #
1586
+ ########################################
1587
+ print0("PRINT: Starting training...", console=True)
1588
+ train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size)
1589
+ training_time_ms = 0
1590
+ torch.cuda.synchronize()
1591
+ t0 = time.perf_counter()
1592
+ train_steps = args.num_iterations
1593
+
1594
+ for step in range(train_steps + 1): # Loop up to num_iterations (inclusive for final validation)
1595
+ last_step = (step == train_steps)
1596
+
1597
+ # --------------- VALIDATION SECTION -----------------
1598
+ # Validate at step 0 (after warmup), at specified intervals, and at the very last step
1599
+ if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0):
1600
+ torch.cuda.synchronize()
1601
+ # Add time from previous segment only if t0 was set (i.e., not the first validation at step 0)
1602
+ if step > 0 : # For step 0, t0 hasn't started a training segment yet
1603
+ current_run_time = 1000 * (time.perf_counter() - t0)
1604
+ training_time_ms += current_run_time
1605
+
1606
+ model_compiled.eval() # Use model_compiled
1607
+ val_batch_size = world_size * args.val_seq_len
1608
+ # Ensure val_tokens is divisible by val_batch_size, or handle remainder
1609
+ if args.val_tokens % val_batch_size != 0:
1610
+ print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True)
1611
+ val_num_steps = args.val_tokens // val_batch_size
1612
+
1613
+ val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size)
1614
+ val_loss_sum = torch.zeros(1, device=device) # Accumulate loss on device
1615
+ actual_val_steps = 0
1616
+ with torch.no_grad():
1617
+ for val_i in range(val_num_steps):
1618
+ try:
1619
+ inputs, targets = next(val_loader)
1620
+ loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled
1621
+ val_loss_sum += loss_val
1622
+ actual_val_steps += 1
1623
+ except StopIteration:
1624
+ print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True)
1625
+ break # Stop if data runs out
1626
+
1627
+ if actual_val_steps > 0:
1628
+ val_loss_avg = val_loss_sum / actual_val_steps
1629
+ else: # Handle case where no validation steps were run (e.g., val_tokens too small or data loader issue)
1630
+ val_loss_avg = torch.tensor(float('nan'), device=device)
1631
+ print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True)
1632
+
1633
+ del val_loader # Clean up
1634
+ dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) # Reduce average loss
1635
+
1636
+ svd_log_str = ""
1637
+ if master_process and 'matrix_groups_for_svd' in locals() and matrix_groups_for_svd:
1638
+ TOPK = 10
1639
+ svd_results_by_category = {}
1640
+
1641
+ with torch.no_grad():
1642
+ # per-category metrics (average over matrices in the group)
1643
+ for name, group_params in matrix_groups_for_svd.items():
1644
+ if not group_params:
1645
+ continue
1646
+ mets = [calculate_svd_metrics(p, topk=TOPK) for p in group_params]
1647
+ if mets:
1648
+ avg_entropy = float(np.mean([m['entropy_norm'] for m in mets]))
1649
+ avg_erank = float(np.mean([m['erank'] for m in mets]))
1650
+ avg_topkE = float(np.mean([m['topk_energy'] for m in mets]))
1651
+ avg_qratio = float(np.mean([m['q75_q25'] for m in mets]))
1652
+ svd_results_by_category[name] = dict(
1653
+ entropy=avg_entropy, erank=avg_erank, topkE=avg_topkE, q75_q25=avg_qratio
1654
+ )
1655
+
1656
+ # VO product as another category
1657
+ vo_mets = []
1658
+ num_layers = len(attn_v_params)
1659
+ for i in range(num_layers):
1660
+ w_v = attn_v_params[i]
1661
+ w_o = attn_o_params[i]
1662
+ w_ov_product = torch.matmul(w_o, w_v)
1663
+ vo_mets.append(calculate_svd_metrics(w_ov_product, topk=TOPK))
1664
+ if vo_mets:
1665
+ svd_results_by_category['vo_prod'] = dict(
1666
+ entropy=float(np.mean([m['entropy_norm'] for m in vo_mets])),
1667
+ erank=float(np.mean([m['erank'] for m in vo_mets])),
1668
+ topkE=float(np.mean([m['topk_energy'] for m in vo_mets])),
1669
+ q75_q25=float(np.mean([m['q75_q25'] for m in vo_mets])),
1670
+ )
1671
+
1672
+ # format logging string (append metrics after entropy)
1673
+ svd_log_parts = []
1674
+ for name, vals in svd_results_by_category.items():
1675
+ svd_log_parts.append(
1676
+ f"{name}:H={vals['entropy']:.4f},top{TOPK}E={vals['topkE']:.2f},eRank={vals['erank']:.1f},q75/q25={vals['q75_q25']:.2f}"
1677
+ )
1678
+ svd_log_str = " ".join(svd_log_parts)
1679
+
1680
+
1681
+ # For step 0, training_time_ms is 0. For subsequent steps, it's cumulative.
1682
+ avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0
1683
+ print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} svd_entropy: {svd_log_str} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True)
1684
+
1685
+ model_compiled.train() # Switch back to train mode
1686
+ torch.cuda.synchronize()
1687
+ t0 = time.perf_counter() # Reset timer for the next training segment
1688
+
1689
+ if last_step:
1690
+ if master_process and args.save_checkpoint:
1691
+ if run_dir_path_str: # Ensure run_dir_path_str is set by master process
1692
+ checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints"
1693
+ checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) # Create checkpoints subdir
1694
+ checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt"
1695
+ log_checkpoint = dict(step=step, code=code, model=model_compiled.state_dict(), # Use model_compiled
1696
+ optimizers=[opt.state_dict() for opt in optimizers])
1697
+ torch.save(log_checkpoint, str(checkpoint_path)) # Convert Path to str for torch.save
1698
+ print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True)
1699
+ else:
1700
+ print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True)
1701
+ break
1702
+
1703
+ # --------------- TRAINING SECTION -----------------
1704
+ try:
1705
+ inputs, targets = next(train_loader)
1706
+ except StopIteration:
1707
+ print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True)
1708
+ break # End if data runs out
1709
+
1710
+ loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) # Use model_compiled
1711
+ loss_train.backward()
1712
+
1713
+ for param in model_compiled.parameters(): # Use model_compiled
1714
+ if param.grad is not None: # Check if grad exists
1715
+ dist.all_reduce(param.grad, op=dist.ReduceOp.AVG)
1716
+
1717
+ current_lr_val = get_lr(step)
1718
+ for opt in optimizers:
1719
+ for group in opt.param_groups:
1720
+ group["lr"] = group["initial_lr"] * current_lr_val
1721
+
1722
+ # --- MODIFICATION: Muon momentum warmup only if optimizer2 (Muon) exists ---
1723
+ if optimizer2 is not None: # Check if Muon optimizer was created
1724
+ for group in optimizer2.param_groups:
1725
+ frac = min(step / 300, 1) # momentum warmup for muon
1726
+ group["momentum"] = (1 - frac) * 0.85 + frac * 0.95
1727
+
1728
+ for opt in optimizers:
1729
+ opt.step()
1730
+
1731
+ model_compiled.zero_grad(set_to_none=True) # Use model_compiled
1732
+
1733
+ # Logging (less frequent for training steps)
1734
+ if step > 0 and (step % 20 == 0 or step == train_steps -1) : # Avoid logging at step 0 before first val
1735
+ # This time is for the current segment since last validation / t0 reset
1736
+ current_segment_time_ms = 1000 * (time.perf_counter() - t0)
1737
+ # approx_training_time_ms is the total cumulative time
1738
+ approx_total_training_time_ms = training_time_ms + current_segment_time_ms
1739
+
1740
+ total_tokens_in_batch = args.train_seq_len * world_size
1741
+ train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item()
1742
+
1743
+ print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) # Log to console too
1744
+
1745
+ print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True)
1746
+ print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB "
1747
+ f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True)
1748
+
1749
+ if dist.is_initialized():
1750
+ dist.destroy_process_group()
1751
+ [2025-09-04 15:58:23] [Rank 0] PRINT: Constructing model...
1752
+ [2025-09-04 15:58:23] [Rank 0] PRINT: Constructing model...
1753
+ [2025-09-04 15:58:25] [Rank 0] PRINT: Broadcasting model parameters...
1754
+ [2025-09-04 15:58:25] [Rank 0] PRINT: Broadcasting model parameters...
1755
+ [2025-09-04 15:58:25] [Rank 0] PRINT: Model constructed and broadcasted.
1756
+ [2025-09-04 15:58:25] [Rank 0] PRINT: Model constructed and broadcasted.
1757
+ [2025-09-04 15:58:25] [Rank 0] PRINT: Collecting parameters for optimizers...
1758
+ [2025-09-04 15:58:25] [Rank 0] PRINT: Collecting parameters for optimizers...
1759
+ [2025-09-04 15:58:25] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 13
1760
+ [2025-09-04 15:58:25] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 13
logs_svd_gated/mode_13_param_gated_seed_42/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "unet": false,
4
+ "seed": 42,
5
+ "optimizer_mode": 13,
6
+ "model_parameterization": "gated",
7
+ "adam_lr": 0.05,
8
+ "muon_lr": 0.05,
9
+ "base_dir": "logs_svd_gated"
10
+ },
11
+ "hyperparameters": {
12
+ "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
13
+ "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
14
+ "val_tokens": 1966080,
15
+ "train_seq_len": 12288,
16
+ "val_seq_len": 65536,
17
+ "num_iterations": 10000,
18
+ "cooldown_frac": 0.4,
19
+ "vocab_size": 50257,
20
+ "val_loss_every": 200,
21
+ "save_checkpoint": false
22
+ },
23
+ "run_uuid_for_log": "29ca794e-db48-4228-89b9-294e22f93633",
24
+ "script_code_logged_at_start": true
25
+ }
logs_svd_gated/mode_13_param_gated_seed_42/training_log_29ca794e-db48-4228-89b9-294e22f93633.txt ADDED
The diff for this file is too large to render. See raw diff
 
logs_svd_gated/mode_13_param_gated_seed_43/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "unet": false,
4
+ "seed": 43,
5
+ "optimizer_mode": 13,
6
+ "model_parameterization": "gated",
7
+ "adam_lr": 0.05,
8
+ "muon_lr": 0.05,
9
+ "base_dir": "logs_svd_gated"
10
+ },
11
+ "hyperparameters": {
12
+ "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
13
+ "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
14
+ "val_tokens": 1966080,
15
+ "train_seq_len": 12288,
16
+ "val_seq_len": 65536,
17
+ "num_iterations": 10000,
18
+ "cooldown_frac": 0.4,
19
+ "vocab_size": 50257,
20
+ "val_loss_every": 200,
21
+ "save_checkpoint": false
22
+ },
23
+ "run_uuid_for_log": "9ef1b43c-d8df-464d-9246-7f66cf8bbaee",
24
+ "script_code_logged_at_start": true
25
+ }
logs_svd_gated/mode_13_param_gated_seed_43/training_log_9ef1b43c-d8df-464d-9246-7f66cf8bbaee.txt ADDED
The diff for this file is too large to render. See raw diff
 
logs_svd_gated/mode_13_param_gated_seed_44/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "unet": false,
4
+ "seed": 44,
5
+ "optimizer_mode": 13,
6
+ "model_parameterization": "gated",
7
+ "adam_lr": 0.05,
8
+ "muon_lr": 0.05,
9
+ "base_dir": "logs_svd_gated"
10
+ },
11
+ "hyperparameters": {
12
+ "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
13
+ "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
14
+ "val_tokens": 1966080,
15
+ "train_seq_len": 12288,
16
+ "val_seq_len": 65536,
17
+ "num_iterations": 10000,
18
+ "cooldown_frac": 0.4,
19
+ "vocab_size": 50257,
20
+ "val_loss_every": 200,
21
+ "save_checkpoint": false
22
+ },
23
+ "run_uuid_for_log": "46d4e9f2-2b76-454e-bfe1-cd91263cd3ea",
24
+ "script_code_logged_at_start": true
25
+ }
logs_svd_gated/mode_13_param_gated_seed_44/training_log_46d4e9f2-2b76-454e-bfe1-cd91263cd3ea.txt ADDED
The diff for this file is too large to render. See raw diff
 
logs_svd_gated/mode_13_param_gated_seed_45/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "unet": false,
4
+ "seed": 45,
5
+ "optimizer_mode": 13,
6
+ "model_parameterization": "gated",
7
+ "adam_lr": 0.05,
8
+ "muon_lr": 0.05,
9
+ "base_dir": "logs_svd_gated"
10
+ },
11
+ "hyperparameters": {
12
+ "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
13
+ "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
14
+ "val_tokens": 1966080,
15
+ "train_seq_len": 12288,
16
+ "val_seq_len": 65536,
17
+ "num_iterations": 10000,
18
+ "cooldown_frac": 0.4,
19
+ "vocab_size": 50257,
20
+ "val_loss_every": 200,
21
+ "save_checkpoint": false
22
+ },
23
+ "run_uuid_for_log": "78f20870-5eda-4682-aced-8cedd91a0415",
24
+ "script_code_logged_at_start": true
25
+ }
logs_svd_gated/mode_13_param_gated_seed_45/training_log_78f20870-5eda-4682-aced-8cedd91a0415.txt ADDED
The diff for this file is too large to render. See raw diff
 
logs_svd_gated/mode_13_param_gated_seed_46/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "unet": false,
4
+ "seed": 46,
5
+ "optimizer_mode": 13,
6
+ "model_parameterization": "gated",
7
+ "adam_lr": 0.05,
8
+ "muon_lr": 0.05,
9
+ "base_dir": "logs_svd_gated"
10
+ },
11
+ "hyperparameters": {
12
+ "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
13
+ "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
14
+ "val_tokens": 1966080,
15
+ "train_seq_len": 12288,
16
+ "val_seq_len": 65536,
17
+ "num_iterations": 10000,
18
+ "cooldown_frac": 0.4,
19
+ "vocab_size": 50257,
20
+ "val_loss_every": 200,
21
+ "save_checkpoint": false
22
+ },
23
+ "run_uuid_for_log": "f15d7967-d463-4726-99a0-e07de412ca4e",
24
+ "script_code_logged_at_start": true
25
+ }
logs_svd_gated/mode_13_param_gated_seed_46/training_log_f15d7967-d463-4726-99a0-e07de412ca4e.txt ADDED
The diff for this file is too large to render. See raw diff
 
logs_svd_gated/mode_13_param_gated_seed_47/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "unet": false,
4
+ "seed": 47,
5
+ "optimizer_mode": 13,
6
+ "model_parameterization": "gated",
7
+ "adam_lr": 0.05,
8
+ "muon_lr": 0.05,
9
+ "base_dir": "logs_svd_gated"
10
+ },
11
+ "hyperparameters": {
12
+ "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
13
+ "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
14
+ "val_tokens": 1966080,
15
+ "train_seq_len": 12288,
16
+ "val_seq_len": 65536,
17
+ "num_iterations": 10000,
18
+ "cooldown_frac": 0.4,
19
+ "vocab_size": 50257,
20
+ "val_loss_every": 200,
21
+ "save_checkpoint": false
22
+ },
23
+ "run_uuid_for_log": "3f513c1c-b909-494f-92a7-f9975950351b",
24
+ "script_code_logged_at_start": true
25
+ }
logs_svd_gated/mode_13_param_gated_seed_47/training_log_3f513c1c-b909-494f-92a7-f9975950351b.txt ADDED
The diff for this file is too large to render. See raw diff
 
logs_svd_gated/mode_13_param_gated_seed_48/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "unet": false,
4
+ "seed": 48,
5
+ "optimizer_mode": 13,
6
+ "model_parameterization": "gated",
7
+ "adam_lr": 0.05,
8
+ "muon_lr": 0.05,
9
+ "base_dir": "logs_svd_gated"
10
+ },
11
+ "hyperparameters": {
12
+ "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
13
+ "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
14
+ "val_tokens": 1966080,
15
+ "train_seq_len": 12288,
16
+ "val_seq_len": 65536,
17
+ "num_iterations": 10000,
18
+ "cooldown_frac": 0.4,
19
+ "vocab_size": 50257,
20
+ "val_loss_every": 200,
21
+ "save_checkpoint": false
22
+ },
23
+ "run_uuid_for_log": "1d32ef1a-6c9c-42b2-8a59-62ddd2143fab",
24
+ "script_code_logged_at_start": true
25
+ }
logs_svd_gated/mode_13_param_gated_seed_48/training_log_1d32ef1a-6c9c-42b2-8a59-62ddd2143fab.txt ADDED
The diff for this file is too large to render. See raw diff
 
logs_svd_gated/mode_13_param_gated_seed_49/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "unet": false,
4
+ "seed": 49,
5
+ "optimizer_mode": 13,
6
+ "model_parameterization": "gated",
7
+ "adam_lr": 0.05,
8
+ "muon_lr": 0.05,
9
+ "base_dir": "logs_svd_gated"
10
+ },
11
+ "hyperparameters": {
12
+ "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
13
+ "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
14
+ "val_tokens": 1966080,
15
+ "train_seq_len": 12288,
16
+ "val_seq_len": 65536,
17
+ "num_iterations": 10000,
18
+ "cooldown_frac": 0.4,
19
+ "vocab_size": 50257,
20
+ "val_loss_every": 200,
21
+ "save_checkpoint": false
22
+ },
23
+ "run_uuid_for_log": "1531a5c8-fb60-4f63-ad76-0f25f42b48db",
24
+ "script_code_logged_at_start": true
25
+ }
logs_svd_gated/mode_13_param_gated_seed_49/training_log_1531a5c8-fb60-4f63-ad76-0f25f42b48db.txt ADDED
The diff for this file is too large to render. See raw diff
 
logs_svd_gated/mode_13_param_gated_seed_50/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "unet": false,
4
+ "seed": 50,
5
+ "optimizer_mode": 13,
6
+ "model_parameterization": "gated",
7
+ "adam_lr": 0.05,
8
+ "muon_lr": 0.05,
9
+ "base_dir": "logs_svd_gated"
10
+ },
11
+ "hyperparameters": {
12
+ "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
13
+ "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
14
+ "val_tokens": 1966080,
15
+ "train_seq_len": 12288,
16
+ "val_seq_len": 65536,
17
+ "num_iterations": 10000,
18
+ "cooldown_frac": 0.4,
19
+ "vocab_size": 50257,
20
+ "val_loss_every": 200,
21
+ "save_checkpoint": false
22
+ },
23
+ "run_uuid_for_log": "f5f8623b-17fe-4271-a358-8cb57ae238a1",
24
+ "script_code_logged_at_start": true
25
+ }
logs_svd_gated/mode_13_param_gated_seed_50/training_log_f5f8623b-17fe-4271-a358-8cb57ae238a1.txt ADDED
The diff for this file is too large to render. See raw diff
 
logs_svd_gated/mode_14_param_gated_seed_41/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "unet": false,
4
+ "seed": 41,
5
+ "optimizer_mode": 14,
6
+ "model_parameterization": "gated",
7
+ "adam_lr": 0.05,
8
+ "muon_lr": 0.05,
9
+ "base_dir": "logs_svd_gated"
10
+ },
11
+ "hyperparameters": {
12
+ "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
13
+ "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
14
+ "val_tokens": 1966080,
15
+ "train_seq_len": 12288,
16
+ "val_seq_len": 65536,
17
+ "num_iterations": 10000,
18
+ "cooldown_frac": 0.4,
19
+ "vocab_size": 50257,
20
+ "val_loss_every": 200,
21
+ "save_checkpoint": false
22
+ },
23
+ "run_uuid_for_log": "3a060110-ad46-4bb9-9bfc-220548766993",
24
+ "script_code_logged_at_start": true
25
+ }
logs_svd_gated/mode_14_param_gated_seed_41/training_log_3a060110-ad46-4bb9-9bfc-220548766993.txt ADDED
The diff for this file is too large to render. See raw diff
 
logs_svd_gated/mode_14_param_gated_seed_42/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "unet": false,
4
+ "seed": 42,
5
+ "optimizer_mode": 14,
6
+ "model_parameterization": "gated",
7
+ "adam_lr": 0.05,
8
+ "muon_lr": 0.05,
9
+ "base_dir": "logs_svd_gated"
10
+ },
11
+ "hyperparameters": {
12
+ "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
13
+ "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
14
+ "val_tokens": 1966080,
15
+ "train_seq_len": 12288,
16
+ "val_seq_len": 65536,
17
+ "num_iterations": 10000,
18
+ "cooldown_frac": 0.4,
19
+ "vocab_size": 50257,
20
+ "val_loss_every": 200,
21
+ "save_checkpoint": false
22
+ },
23
+ "run_uuid_for_log": "3c8ef23e-8e99-4dfb-af73-28cde593d61e",
24
+ "script_code_logged_at_start": true
25
+ }
logs_svd_gated/mode_14_param_gated_seed_42/training_log_3c8ef23e-8e99-4dfb-af73-28cde593d61e.txt ADDED
The diff for this file is too large to render. See raw diff
 
logs_svd_gated/mode_14_param_gated_seed_43/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "unet": false,
4
+ "seed": 43,
5
+ "optimizer_mode": 14,
6
+ "model_parameterization": "gated",
7
+ "adam_lr": 0.05,
8
+ "muon_lr": 0.05,
9
+ "base_dir": "logs_svd_gated"
10
+ },
11
+ "hyperparameters": {
12
+ "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
13
+ "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
14
+ "val_tokens": 1966080,
15
+ "train_seq_len": 12288,
16
+ "val_seq_len": 65536,
17
+ "num_iterations": 10000,
18
+ "cooldown_frac": 0.4,
19
+ "vocab_size": 50257,
20
+ "val_loss_every": 200,
21
+ "save_checkpoint": false
22
+ },
23
+ "run_uuid_for_log": "1c163800-35b0-4389-b3a7-5f103382de01",
24
+ "script_code_logged_at_start": true
25
+ }
logs_svd_gated/mode_14_param_gated_seed_43/training_log_1c163800-35b0-4389-b3a7-5f103382de01.txt ADDED
The diff for this file is too large to render. See raw diff
 
logs_svd_gated/mode_14_param_gated_seed_44/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "unet": false,
4
+ "seed": 44,
5
+ "optimizer_mode": 14,
6
+ "model_parameterization": "gated",
7
+ "adam_lr": 0.05,
8
+ "muon_lr": 0.05,
9
+ "base_dir": "logs_svd_gated"
10
+ },
11
+ "hyperparameters": {
12
+ "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
13
+ "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
14
+ "val_tokens": 1966080,
15
+ "train_seq_len": 12288,
16
+ "val_seq_len": 65536,
17
+ "num_iterations": 10000,
18
+ "cooldown_frac": 0.4,
19
+ "vocab_size": 50257,
20
+ "val_loss_every": 200,
21
+ "save_checkpoint": false
22
+ },
23
+ "run_uuid_for_log": "b3f636bf-aaae-4f63-84ba-14c79a0fac04",
24
+ "script_code_logged_at_start": true
25
+ }
logs_svd_gated/mode_14_param_gated_seed_44/training_log_b3f636bf-aaae-4f63-84ba-14c79a0fac04.txt ADDED
The diff for this file is too large to render. See raw diff
 
logs_svd_gated/mode_14_param_gated_seed_45/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "unet": false,
4
+ "seed": 45,
5
+ "optimizer_mode": 14,
6
+ "model_parameterization": "gated",
7
+ "adam_lr": 0.05,
8
+ "muon_lr": 0.05,
9
+ "base_dir": "logs_svd_gated"
10
+ },
11
+ "hyperparameters": {
12
+ "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
13
+ "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
14
+ "val_tokens": 1966080,
15
+ "train_seq_len": 12288,
16
+ "val_seq_len": 65536,
17
+ "num_iterations": 10000,
18
+ "cooldown_frac": 0.4,
19
+ "vocab_size": 50257,
20
+ "val_loss_every": 200,
21
+ "save_checkpoint": false
22
+ },
23
+ "run_uuid_for_log": "45ff64f4-c3fe-4a27-b43d-8b30681d5861",
24
+ "script_code_logged_at_start": true
25
+ }
logs_svd_gated/mode_14_param_gated_seed_45/training_log_45ff64f4-c3fe-4a27-b43d-8b30681d5861.txt ADDED
The diff for this file is too large to render. See raw diff
 
logs_svd_gated/mode_14_param_gated_seed_46/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "unet": false,
4
+ "seed": 46,
5
+ "optimizer_mode": 14,
6
+ "model_parameterization": "gated",
7
+ "adam_lr": 0.05,
8
+ "muon_lr": 0.05,
9
+ "base_dir": "logs_svd_gated"
10
+ },
11
+ "hyperparameters": {
12
+ "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
13
+ "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
14
+ "val_tokens": 1966080,
15
+ "train_seq_len": 12288,
16
+ "val_seq_len": 65536,
17
+ "num_iterations": 10000,
18
+ "cooldown_frac": 0.4,
19
+ "vocab_size": 50257,
20
+ "val_loss_every": 200,
21
+ "save_checkpoint": false
22
+ },
23
+ "run_uuid_for_log": "f81cb117-3729-42b6-a4bf-001b4dc1d990",
24
+ "script_code_logged_at_start": true
25
+ }
logs_svd_gated/mode_14_param_gated_seed_46/training_log_f81cb117-3729-42b6-a4bf-001b4dc1d990.txt ADDED
The diff for this file is too large to render. See raw diff
 
logs_svd_gated/mode_14_param_gated_seed_47/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "unet": false,
4
+ "seed": 47,
5
+ "optimizer_mode": 14,
6
+ "model_parameterization": "gated",
7
+ "adam_lr": 0.05,
8
+ "muon_lr": 0.05,
9
+ "base_dir": "logs_svd_gated"
10
+ },
11
+ "hyperparameters": {
12
+ "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
13
+ "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
14
+ "val_tokens": 1966080,
15
+ "train_seq_len": 12288,
16
+ "val_seq_len": 65536,
17
+ "num_iterations": 10000,
18
+ "cooldown_frac": 0.4,
19
+ "vocab_size": 50257,
20
+ "val_loss_every": 200,
21
+ "save_checkpoint": false
22
+ },
23
+ "run_uuid_for_log": "7633ecfb-e90e-4d78-8c06-563f8c802dee",
24
+ "script_code_logged_at_start": true
25
+ }
logs_svd_gated/mode_14_param_gated_seed_47/training_log_7633ecfb-e90e-4d78-8c06-563f8c802dee.txt ADDED
The diff for this file is too large to render. See raw diff
 
logs_svd_gated/mode_14_param_gated_seed_48/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "unet": false,
4
+ "seed": 48,
5
+ "optimizer_mode": 14,
6
+ "model_parameterization": "gated",
7
+ "adam_lr": 0.05,
8
+ "muon_lr": 0.05,
9
+ "base_dir": "logs_svd_gated"
10
+ },
11
+ "hyperparameters": {
12
+ "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
13
+ "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
14
+ "val_tokens": 1966080,
15
+ "train_seq_len": 12288,
16
+ "val_seq_len": 65536,
17
+ "num_iterations": 10000,
18
+ "cooldown_frac": 0.4,
19
+ "vocab_size": 50257,
20
+ "val_loss_every": 200,
21
+ "save_checkpoint": false
22
+ },
23
+ "run_uuid_for_log": "add87a33-be2f-4e3e-afdf-d3bf661e7185",
24
+ "script_code_logged_at_start": true
25
+ }
logs_svd_gated/mode_14_param_gated_seed_48/training_log_add87a33-be2f-4e3e-afdf-d3bf661e7185.txt ADDED
The diff for this file is too large to render. See raw diff
 
logs_svd_gated/mode_14_param_gated_seed_49/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "unet": false,
4
+ "seed": 49,
5
+ "optimizer_mode": 14,
6
+ "model_parameterization": "gated",
7
+ "adam_lr": 0.05,
8
+ "muon_lr": 0.05,
9
+ "base_dir": "logs_svd_gated"
10
+ },
11
+ "hyperparameters": {
12
+ "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
13
+ "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
14
+ "val_tokens": 1966080,
15
+ "train_seq_len": 12288,
16
+ "val_seq_len": 65536,
17
+ "num_iterations": 10000,
18
+ "cooldown_frac": 0.4,
19
+ "vocab_size": 50257,
20
+ "val_loss_every": 200,
21
+ "save_checkpoint": false
22
+ },
23
+ "run_uuid_for_log": "a67ed87a-addb-45e9-9122-746d6f16e641",
24
+ "script_code_logged_at_start": true
25
+ }
logs_svd_gated/mode_14_param_gated_seed_49/training_log_a67ed87a-addb-45e9-9122-746d6f16e641.txt ADDED
The diff for this file is too large to render. See raw diff
 
logs_svd_gated/mode_14_param_gated_seed_50/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "unet": false,
4
+ "seed": 50,
5
+ "optimizer_mode": 14,
6
+ "model_parameterization": "gated",
7
+ "adam_lr": 0.05,
8
+ "muon_lr": 0.05,
9
+ "base_dir": "logs_svd_gated"
10
+ },
11
+ "hyperparameters": {
12
+ "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
13
+ "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
14
+ "val_tokens": 1966080,
15
+ "train_seq_len": 12288,
16
+ "val_seq_len": 65536,
17
+ "num_iterations": 10000,
18
+ "cooldown_frac": 0.4,
19
+ "vocab_size": 50257,
20
+ "val_loss_every": 200,
21
+ "save_checkpoint": false
22
+ },
23
+ "run_uuid_for_log": "20edc821-54ac-4e8f-8176-8387c86d21f5",
24
+ "script_code_logged_at_start": true
25
+ }
logs_svd_gated/mode_14_param_gated_seed_50/training_log_20edc821-54ac-4e8f-8176-8387c86d21f5.txt ADDED
The diff for this file is too large to render. See raw diff
 
logs_svd_gated/mode_15_param_gated_seed_41/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "unet": false,
4
+ "seed": 41,
5
+ "optimizer_mode": 15,
6
+ "model_parameterization": "gated",
7
+ "adam_lr": 0.05,
8
+ "muon_lr": 0.05,
9
+ "base_dir": "logs_svd_gated"
10
+ },
11
+ "hyperparameters": {
12
+ "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
13
+ "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
14
+ "val_tokens": 1966080,
15
+ "train_seq_len": 12288,
16
+ "val_seq_len": 65536,
17
+ "num_iterations": 10000,
18
+ "cooldown_frac": 0.4,
19
+ "vocab_size": 50257,
20
+ "val_loss_every": 200,
21
+ "save_checkpoint": false
22
+ },
23
+ "run_uuid_for_log": "f146521e-11b7-47c0-93e1-af861941cb9b",
24
+ "script_code_logged_at_start": true
25
+ }
logs_svd_gated/mode_15_param_gated_seed_41/training_log_f146521e-11b7-47c0-93e1-af861941cb9b.txt ADDED
The diff for this file is too large to render. See raw diff
 
logs_svd_gated/mode_15_param_gated_seed_42/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "unet": false,
4
+ "seed": 42,
5
+ "optimizer_mode": 15,
6
+ "model_parameterization": "gated",
7
+ "adam_lr": 0.05,
8
+ "muon_lr": 0.05,
9
+ "base_dir": "logs_svd_gated"
10
+ },
11
+ "hyperparameters": {
12
+ "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
13
+ "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
14
+ "val_tokens": 1966080,
15
+ "train_seq_len": 12288,
16
+ "val_seq_len": 65536,
17
+ "num_iterations": 10000,
18
+ "cooldown_frac": 0.4,
19
+ "vocab_size": 50257,
20
+ "val_loss_every": 200,
21
+ "save_checkpoint": false
22
+ },
23
+ "run_uuid_for_log": "1501a628-3a92-4eec-9378-5faa95a74a96",
24
+ "script_code_logged_at_start": true
25
+ }
logs_svd_gated/mode_15_param_gated_seed_42/training_log_1501a628-3a92-4eec-9378-5faa95a74a96.txt ADDED
The diff for this file is too large to render. See raw diff
 
logs_svd_gated/mode_15_param_gated_seed_43/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "unet": false,
4
+ "seed": 43,
5
+ "optimizer_mode": 15,
6
+ "model_parameterization": "gated",
7
+ "adam_lr": 0.05,
8
+ "muon_lr": 0.05,
9
+ "base_dir": "logs_svd_gated"
10
+ },
11
+ "hyperparameters": {
12
+ "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
13
+ "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
14
+ "val_tokens": 1966080,
15
+ "train_seq_len": 12288,
16
+ "val_seq_len": 65536,
17
+ "num_iterations": 10000,
18
+ "cooldown_frac": 0.4,
19
+ "vocab_size": 50257,
20
+ "val_loss_every": 200,
21
+ "save_checkpoint": false
22
+ },
23
+ "run_uuid_for_log": "e0218743-5660-4687-92f9-454060288cb7",
24
+ "script_code_logged_at_start": true
25
+ }
logs_svd_gated/mode_15_param_gated_seed_43/training_log_e0218743-5660-4687-92f9-454060288cb7.txt ADDED
The diff for this file is too large to render. See raw diff
 
logs_svd_gated/mode_15_param_gated_seed_44/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "unet": false,
4
+ "seed": 44,
5
+ "optimizer_mode": 15,
6
+ "model_parameterization": "gated",
7
+ "adam_lr": 0.05,
8
+ "muon_lr": 0.05,
9
+ "base_dir": "logs_svd_gated"
10
+ },
11
+ "hyperparameters": {
12
+ "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
13
+ "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
14
+ "val_tokens": 1966080,
15
+ "train_seq_len": 12288,
16
+ "val_seq_len": 65536,
17
+ "num_iterations": 10000,
18
+ "cooldown_frac": 0.4,
19
+ "vocab_size": 50257,
20
+ "val_loss_every": 200,
21
+ "save_checkpoint": false
22
+ },
23
+ "run_uuid_for_log": "4acb41e0-2540-49e6-8cc2-39b68527f0d1",
24
+ "script_code_logged_at_start": true
25
+ }
logs_svd_gated/mode_15_param_gated_seed_44/training_log_4acb41e0-2540-49e6-8cc2-39b68527f0d1.txt ADDED
The diff for this file is too large to render. See raw diff
 
logs_svd_gated/mode_15_param_gated_seed_45/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "unet": false,
4
+ "seed": 45,
5
+ "optimizer_mode": 15,
6
+ "model_parameterization": "gated",
7
+ "adam_lr": 0.05,
8
+ "muon_lr": 0.05,
9
+ "base_dir": "logs_svd_gated"
10
+ },
11
+ "hyperparameters": {
12
+ "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
13
+ "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
14
+ "val_tokens": 1966080,
15
+ "train_seq_len": 12288,
16
+ "val_seq_len": 65536,
17
+ "num_iterations": 10000,
18
+ "cooldown_frac": 0.4,
19
+ "vocab_size": 50257,
20
+ "val_loss_every": 200,
21
+ "save_checkpoint": false
22
+ },
23
+ "run_uuid_for_log": "43ee740f-19e3-454c-ad13-3a07cef2afcf",
24
+ "script_code_logged_at_start": true
25
+ }