zhangfz commited on
Commit
b9d31c3
·
1 Parent(s): 2b2c4b7
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. logs_new_MUON_large_reshape_svd_gated/adam_lr_search/mode_5_param_gated_muon_lr_0.0005_adam_lr_0.0001_seed_42/config.json +27 -0
  2. logs_new_MUON_large_reshape_svd_gated/adam_lr_search/mode_5_param_gated_muon_lr_0.0005_adam_lr_0.0002_seed_42/config.json +27 -0
  3. logs_new_MUON_large_reshape_svd_gated/adam_lr_search/mode_5_param_gated_muon_lr_0.0005_adam_lr_0.0002_seed_42/training_log_e1801bc8-61ea-4070-99f6-ffb8e3ec4441.txt +0 -0
  4. logs_new_MUON_large_reshape_svd_gated/adam_lr_search/mode_5_param_gated_muon_lr_0.0005_adam_lr_0.0002_seed_43/config.json +27 -0
  5. logs_new_MUON_large_reshape_svd_gated/adam_lr_search/mode_5_param_gated_muon_lr_0.0005_adam_lr_0.0002_seed_43/training_log_87651557-14a7-4c0b-80e7-7f6b32172f1e.txt +0 -0
  6. logs_new_MUON_large_reshape_svd_gated/ori/mode_1_param_gated_muon_lr_0.0005_adam_lr_0.0005_seed_42/config.json +27 -0
  7. logs_new_MUON_large_reshape_svd_gated/ori/mode_1_param_gated_muon_lr_0.0005_adam_lr_0.0005_seed_42/training_log_0df68171-82e1-4de2-a0c5-af00f95db131.txt +0 -0
  8. logs_new_MUON_large_reshape_svd_gated/ori/mode_1_param_gated_muon_lr_0.0005_adam_lr_0.0005_seed_43/config.json +27 -0
  9. logs_new_MUON_large_reshape_svd_gated/ori/mode_1_param_gated_muon_lr_0.0005_adam_lr_0.0005_seed_43/training_log_14f8cd0f-e874-488a-ab8b-8e92f305dfaf.txt +1168 -0
  10. logs_new_MUON_large_reshape_svd_gated/svd/mode_5_param_gated_seed_44/training_log_71322229-20fb-4bd6-8ca5-98de9717fbce.txt +0 -0
  11. sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.01_seed_42/config.json +1 -1
  12. sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.01_seed_42/training_log_b8831a4c-12f0-4a61-ae8b-4f32def89ede.txt +0 -0
  13. sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.01_seed_43/config.json +25 -0
  14. sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.01_seed_43/training_log_13322988-b0ae-4e6e-81c8-a19f29460fb7.txt +0 -0
  15. sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.01_seed_45/config.json +25 -0
  16. sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.01_seed_45/training_log_f3c2cbd7-4390-4795-8459-adb334970d83.txt +0 -0
  17. sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.01_seed_46/config.json +25 -0
  18. sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.01_seed_46/training_log_8c5ef51b-70e7-40a7-8fcb-ab0c5dbcbca6.txt +0 -0
  19. sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.02_seed_42/config.json +25 -0
  20. sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.02_seed_42/training_log_535acb30-1e05-43d0-8a6e-f7fdd7e93523.txt +0 -0
  21. sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.02_seed_43/config.json +25 -0
  22. sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.02_seed_43/training_log_51a5fccc-1092-4e8f-b775-94e3f7f8f701.txt +0 -0
  23. sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.02_seed_45/config.json +25 -0
  24. sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.02_seed_45/training_log_6b55014d-0163-4948-acef-1bd7c6fac142.txt +0 -0
  25. sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.02_seed_46/config.json +25 -0
  26. sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.02_seed_46/training_log_14b69eae-8a7c-4dbc-8bac-b2aab72b48a7.txt +0 -0
  27. sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.05_seed_42/config.json +25 -0
  28. sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.05_seed_42/training_log_ff0f11cb-7fdd-4119-a532-b5b040bdd0b2.txt +0 -0
  29. sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.05_seed_43/config.json +25 -0
  30. sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.05_seed_43/training_log_74fa81b7-209f-4653-9e84-0c69fb6fc666.txt +0 -0
  31. sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.05_seed_45/config.json +25 -0
  32. sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.05_seed_45/training_log_15456da8-2f1d-4ae5-965d-2481be0b71f3.txt +0 -0
  33. sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.05_seed_46/config.json +25 -0
  34. sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.05_seed_46/training_log_a54cc0c5-fdb0-4fe6-a467-7df0739500c8.txt +0 -0
  35. sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.1_seed_42/config.json +1 -1
  36. sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.1_seed_42/training_log_fab5639f-ba5b-4d9f-a38c-69aa47967c84.txt +0 -0
  37. sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.1_seed_43/config.json +25 -0
  38. sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.1_seed_43/training_log_c98c52eb-2b46-4f88-badc-890fc51959c2.txt +0 -0
  39. sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.1_seed_45/config.json +25 -0
  40. sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.1_seed_45/training_log_ef7bf1f7-de55-4a4b-a0ab-27702430dc8d.txt +0 -0
  41. sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.1_seed_46/config.json +25 -0
  42. sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.1_seed_46/training_log_7b0e9962-3dc8-4d4b-9148-5ebf758f1505.txt +0 -0
  43. sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.01_seed_42/config.json +1 -1
  44. sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.01_seed_42/training_log_2b27f121-8999-4983-b952-012571b08358.txt +0 -0
  45. sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.01_seed_42/training_log_980127d3-ed72-4576-88df-d5d63142f40f.txt +0 -0
  46. sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.01_seed_43/config.json +25 -0
  47. sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.01_seed_43/training_log_6f3da0f7-185f-42a1-a36f-b3e49dc3211c.txt +0 -0
  48. sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.01_seed_45/config.json +25 -0
  49. sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.01_seed_45/training_log_43310052-95d8-45d5-a51d-d782e89944ba.txt +0 -0
  50. sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.01_seed_45/training_log_a2a2964f-b4b1-4192-b944-13a0b2cbcc4f.txt +0 -0
logs_new_MUON_large_reshape_svd_gated/adam_lr_search/mode_5_param_gated_muon_lr_0.0005_adam_lr_0.0001_seed_42/config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "seed": 42,
4
+ "optimizer_mode": 5,
5
+ "model_parameterization": "gated",
6
+ "adam_lr": 0.0001,
7
+ "muon_lr": 0.0005,
8
+ "base_dir": "logs_new_MUON_large_reshape_svd_gated/adam_lr_search"
9
+ },
10
+ "hyperparameters": {
11
+ "input_bin": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
12
+ "input_val_bin": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
13
+ "batch_size": 960,
14
+ "device_batch_size": 24,
15
+ "sequence_length": 1024,
16
+ "num_iterations": 6000,
17
+ "learning_rate": 0.0018,
18
+ "warmup_iters": 0,
19
+ "warmdown_iters": 0,
20
+ "weight_decay": 0,
21
+ "val_loss_every": 125,
22
+ "val_tokens": 10420224,
23
+ "save_every": 0
24
+ },
25
+ "run_uuid_for_log": "261fa789-df7d-467c-8a61-132d8b890d81",
26
+ "script_code_logged_at_start": true
27
+ }
logs_new_MUON_large_reshape_svd_gated/adam_lr_search/mode_5_param_gated_muon_lr_0.0005_adam_lr_0.0002_seed_42/config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "seed": 42,
4
+ "optimizer_mode": 5,
5
+ "model_parameterization": "gated",
6
+ "adam_lr": 0.0002,
7
+ "muon_lr": 0.0005,
8
+ "base_dir": "logs_new_MUON_large_reshape_svd_gated/adam_lr_search"
9
+ },
10
+ "hyperparameters": {
11
+ "input_bin": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
12
+ "input_val_bin": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
13
+ "batch_size": 960,
14
+ "device_batch_size": 24,
15
+ "sequence_length": 1024,
16
+ "num_iterations": 6000,
17
+ "learning_rate": 0.0018,
18
+ "warmup_iters": 0,
19
+ "warmdown_iters": 0,
20
+ "weight_decay": 0,
21
+ "val_loss_every": 125,
22
+ "val_tokens": 10420224,
23
+ "save_every": 0
24
+ },
25
+ "run_uuid_for_log": "e1801bc8-61ea-4070-99f6-ffb8e3ec4441",
26
+ "script_code_logged_at_start": true
27
+ }
logs_new_MUON_large_reshape_svd_gated/adam_lr_search/mode_5_param_gated_muon_lr_0.0005_adam_lr_0.0002_seed_42/training_log_e1801bc8-61ea-4070-99f6-ffb8e3ec4441.txt ADDED
The diff for this file is too large to render. See raw diff
 
logs_new_MUON_large_reshape_svd_gated/adam_lr_search/mode_5_param_gated_muon_lr_0.0005_adam_lr_0.0002_seed_43/config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "seed": 43,
4
+ "optimizer_mode": 5,
5
+ "model_parameterization": "gated",
6
+ "adam_lr": 0.0002,
7
+ "muon_lr": 0.0005,
8
+ "base_dir": "logs_new_MUON_large_reshape_svd_gated/adam_lr_search"
9
+ },
10
+ "hyperparameters": {
11
+ "input_bin": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
12
+ "input_val_bin": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
13
+ "batch_size": 960,
14
+ "device_batch_size": 24,
15
+ "sequence_length": 1024,
16
+ "num_iterations": 6000,
17
+ "learning_rate": 0.0018,
18
+ "warmup_iters": 0,
19
+ "warmdown_iters": 0,
20
+ "weight_decay": 0,
21
+ "val_loss_every": 125,
22
+ "val_tokens": 10420224,
23
+ "save_every": 0
24
+ },
25
+ "run_uuid_for_log": "87651557-14a7-4c0b-80e7-7f6b32172f1e",
26
+ "script_code_logged_at_start": true
27
+ }
logs_new_MUON_large_reshape_svd_gated/adam_lr_search/mode_5_param_gated_muon_lr_0.0005_adam_lr_0.0002_seed_43/training_log_87651557-14a7-4c0b-80e7-7f6b32172f1e.txt ADDED
The diff for this file is too large to render. See raw diff
 
logs_new_MUON_large_reshape_svd_gated/ori/mode_1_param_gated_muon_lr_0.0005_adam_lr_0.0005_seed_42/config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "seed": 42,
4
+ "optimizer_mode": 1,
5
+ "model_parameterization": "gated",
6
+ "adam_lr": 0.0005,
7
+ "muon_lr": 0.0005,
8
+ "base_dir": "logs_new_MUON_large_reshape_svd_gated/ori"
9
+ },
10
+ "hyperparameters": {
11
+ "input_bin": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
12
+ "input_val_bin": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
13
+ "batch_size": 960,
14
+ "device_batch_size": 24,
15
+ "sequence_length": 1024,
16
+ "num_iterations": 6000,
17
+ "learning_rate": 0.0018,
18
+ "warmup_iters": 0,
19
+ "warmdown_iters": 0,
20
+ "weight_decay": 0,
21
+ "val_loss_every": 125,
22
+ "val_tokens": 10420224,
23
+ "save_every": 0
24
+ },
25
+ "run_uuid_for_log": "0df68171-82e1-4de2-a0c5-af00f95db131",
26
+ "script_code_logged_at_start": true
27
+ }
logs_new_MUON_large_reshape_svd_gated/ori/mode_1_param_gated_muon_lr_0.0005_adam_lr_0.0005_seed_42/training_log_0df68171-82e1-4de2-a0c5-af00f95db131.txt ADDED
The diff for this file is too large to render. See raw diff
 
logs_new_MUON_large_reshape_svd_gated/ori/mode_1_param_gated_muon_lr_0.0005_adam_lr_0.0005_seed_43/config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "seed": 43,
4
+ "optimizer_mode": 1,
5
+ "model_parameterization": "gated",
6
+ "adam_lr": 0.0005,
7
+ "muon_lr": 0.0005,
8
+ "base_dir": "logs_new_MUON_large_reshape_svd_gated/ori"
9
+ },
10
+ "hyperparameters": {
11
+ "input_bin": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
12
+ "input_val_bin": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
13
+ "batch_size": 960,
14
+ "device_batch_size": 24,
15
+ "sequence_length": 1024,
16
+ "num_iterations": 6000,
17
+ "learning_rate": 0.0018,
18
+ "warmup_iters": 0,
19
+ "warmdown_iters": 0,
20
+ "weight_decay": 0,
21
+ "val_loss_every": 125,
22
+ "val_tokens": 10420224,
23
+ "save_every": 0
24
+ },
25
+ "run_uuid_for_log": "14f8cd0f-e874-488a-ab8b-8e92f305dfaf",
26
+ "script_code_logged_at_start": true
27
+ }
logs_new_MUON_large_reshape_svd_gated/ori/mode_1_param_gated_muon_lr_0.0005_adam_lr_0.0005_seed_43/training_log_14f8cd0f-e874-488a-ab8b-8e92f305dfaf.txt ADDED
@@ -0,0 +1,1168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ with open(sys.argv[0]) as f:
4
+ code = f.read() # read the code of this file ASAP, for logging
5
+ import uuid
6
+ import time
7
+ import copy
8
+ import glob
9
+ from dataclasses import dataclass, asdict
10
+ from functools import lru_cache
11
+ from pathlib import Path
12
+ import argparse # Keep argparse for --unet and potentially --optimizer_mode
13
+ import json
14
+ import random
15
+ import numpy as np
16
+
17
+ os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
18
+ import torch
19
+ torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems
20
+ from torch import Tensor, nn
21
+ import torch.nn.functional as F
22
+ import torch.distributed as dist
23
+ # use of FlexAttention contributed by @KoszarskyB
24
+ from torch.nn.attention.flex_attention import BlockMask, flex_attention
25
+ sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present
26
+ from optimizers.MUON_new_large_nes import Muon
27
+ from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed
28
+ import torch._inductor.config as config
29
+ from torch.nn.parallel import DistributedDataParallel as DDP
30
+ from kn_util.utils import setup_debugpy
31
+
32
+
33
+ # -----------------------------------------------------------------------------
34
+ # Seeding Function
35
+ def set_seed(seed):
36
+ random.seed(seed)
37
+ np.random.seed(seed)
38
+ torch.manual_seed(seed)
39
+ if torch.cuda.is_available():
40
+ torch.cuda.manual_seed_all(seed)
41
+ print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks
42
+
43
+
44
+ # -----------------------------------------------------------------------------
45
+ # Our own simple Distributed Data Loader
46
+
47
+ def _peek_data_shard(filename):
48
+ # only reads the header, returns header data
49
+ with open(filename, "rb") as f:
50
+ # first read the header, which is 256 int32 integers (4 bytes each)
51
+ header = np.frombuffer(f.read(256*4), dtype=np.int32)
52
+ if header[0] != 20240520:
53
+ print("ERROR: magic number mismatch in the data .bin file!")
54
+ print("---> HINT: Are you passing in a correct file with --input_bin?")
55
+ print("---> HINT: Dataset encoding changed recently, re-run data prepro or refer again to README")
56
+ print("---> HINT: For example re-run: `python dev/data/tinyshakespeare.py`, then re-try")
57
+ exit(1)
58
+ assert header[1] == 1, "unsupported version"
59
+ ntok = header[2] # number of tokens (claimed)
60
+ return ntok # for now just return the number of tokens
61
+
62
+ def _load_data_shard(filename):
63
+ with open(filename, "rb") as f:
64
+ # first read the header, which is 256 int32 integers (4 bytes each)
65
+ header = np.frombuffer(f.read(256*4), dtype=np.int32)
66
+ assert header[0] == 20240520, "magic number mismatch in the data .bin file"
67
+ assert header[1] == 1, "unsupported version"
68
+ ntok = header[2] # number of tokens (claimed)
69
+ # the rest of it are tokens, stored as uint16
70
+ tokens = np.frombuffer(f.read(), dtype=np.uint16)
71
+ assert len(tokens) == ntok, "number of tokens read does not match header?"
72
+ return tokens
73
+
74
+ class DistributedDataLoader:
75
+ def __init__(self, filename_pattern, B, T, process_rank, num_processes):
76
+ self.process_rank = process_rank
77
+ self.num_processes = num_processes
78
+ self.B = B
79
+ self.T = T
80
+
81
+ # glob files that match the pattern
82
+ self.files = sorted(glob.glob(filename_pattern))
83
+ assert len(self.files) > 0, f"did not find any files that match the pattern {filename_pattern}"
84
+
85
+ # load and validate all data shards, count number of tokens in total
86
+ ntok_total = 0
87
+ for fname in self.files:
88
+ shard_ntok = _peek_data_shard(fname)
89
+ assert shard_ntok >= num_processes * B * T + 1
90
+ ntok_total += int(shard_ntok)
91
+ self.ntok_total = ntok_total
92
+
93
+ # kick things off
94
+ self.reset()
95
+
96
+ def reset(self):
97
+ self.current_shard = 0
98
+ self.current_position = self.process_rank * self.B * self.T
99
+ self.tokens = _load_data_shard(self.files[self.current_shard])
100
+
101
+ def advance(self): # advance to next data shard
102
+ self.current_shard = (self.current_shard + 1) % len(self.files)
103
+ self.current_position = self.process_rank * self.B * self.T
104
+ self.tokens = _load_data_shard(self.files[self.current_shard])
105
+
106
+ def next_batch(self):
107
+ B = self.B
108
+ T = self.T
109
+ buf = self.tokens[self.current_position : self.current_position+B*T+1]
110
+ buf = torch.tensor(buf.astype(np.int32), dtype=torch.long)
111
+ x = (buf[:-1]).view(B, T) # inputs
112
+ y = (buf[1:]).view(B, T) # targets
113
+ # advance current position and load next shard if necessary
114
+ self.current_position += B * T * self.num_processes
115
+ if self.current_position + (B * T * self.num_processes + 1) > len(self.tokens):
116
+ self.advance()
117
+ return x.cuda(), y.cuda()
118
+
119
+ # -----------------------------------------------------------------------------
120
+ # int main
121
+
122
+ @dataclass
123
+ class Hyperparameters:
124
+ # data hyperparams
125
+ input_bin : str = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin"
126
+ input_val_bin : str = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin"
127
+ # optimization hyperparams
128
+ batch_size : int = 8*120 # 8*120 # batch size, in sequences, across all devices
129
+ device_batch_size : int = 24 # batch size, in sequences, per device
130
+ sequence_length : int = 1024 # sequence length, in tokens
131
+ num_iterations : int = 6000 # number of iterations to run
132
+ learning_rate : float = 0.0036 / 2
133
+ warmup_iters : int = 0
134
+ warmdown_iters : int = 0 # number of iterations of linear warmup/warmdown for triangular or trapezoidal schedule
135
+ weight_decay : float = 0
136
+ # evaluation and logging hyperparams
137
+ val_loss_every : int = 125 # every how many steps to evaluate val loss? 0 for only at the end
138
+ val_tokens : int = 10420224 # 10420224 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons
139
+ save_every : int = 0 # every how many steps to save the checkpoint? 0 for only at the end
140
+ args = Hyperparameters()
141
+
142
+
143
+
144
+ # -----------------------------------------------------------------------------
145
+ # int main
146
+ # setup_debugpy(force=True)
147
+ parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon")
148
+ parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility")
149
+ # --- MODIFICATION: Add optimizer_mode as a CLI argument ---
150
+ parser.add_argument("--optimizer_mode", type=int, default=0,
151
+ help="Defines how Muon is applied. "
152
+ "0: Muon(All Hidden Attn+MLP - original); "
153
+ "1: Muon(QK Attn)/Adam(VO Attn,MLP); "
154
+ "2: Muon(VO Attn)/Adam(QK Attn,MLP); "
155
+ "3: Muon(All Attn)/Adam(MLP); "
156
+ "4: Muon(MLP)/Adam(All Attn)"
157
+ "5: All Adam (No Muon, all applicable matrices to Adam)."
158
+ "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)."
159
+ "7: Muon(VO Attn, MLP)/Adam(QK Attn)."
160
+ "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)."
161
+ )
162
+ parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"])
163
+ parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices")
164
+ parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices")
165
+ parser.add_argument("--base_dir", type=str, default="logs_new_MUON_large/test", help="Base directory for logs")
166
+ exp_args = parser.parse_args()
167
+ set_seed(exp_args.seed)
168
+
169
+
170
+
171
+ # set up DDP (distributed data parallel). torchrun sets this env variable
172
+ assert torch.cuda.is_available()
173
+ dist.init_process_group(backend='nccl')
174
+ ddp_rank = int(os.environ['RANK'])
175
+ ddp_local_rank = int(os.environ['LOCAL_RANK'])
176
+ ddp_world_size = int(os.environ['WORLD_SIZE'])
177
+ device = f'cuda:{ddp_local_rank}'
178
+ torch.cuda.set_device(device)
179
+ print(f"using device: {device}")
180
+ master_process = (ddp_rank == 0) # this process will do logging, checkpointing etc.
181
+
182
+ logfile = None
183
+ run_dir_path_str = None
184
+ base_log_dir = Path(exp_args.base_dir)
185
+
186
+
187
+ if master_process:
188
+ import subprocess
189
+ set_seed(exp_args.seed)
190
+
191
+ # Construct folder name based on config and seed
192
+ # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_seed_{exp_args.seed}"
193
+ run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_muon_lr_{exp_args.muon_lr}_adam_lr_{exp_args.adam_lr}_seed_{exp_args.seed}"
194
+ run_dir_path = base_log_dir / run_folder_name
195
+ run_dir_path.mkdir(parents=True, exist_ok=True)
196
+ run_dir_path_str = str(run_dir_path)
197
+
198
+ run_uuid = uuid.uuid4()
199
+ logfile = run_dir_path / f"training_log_{run_uuid}.txt"
200
+ print(f"Logging to: {logfile}")
201
+
202
+ # Save configuration
203
+ config_to_save = {
204
+ "cli_args": vars(exp_args),
205
+ "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)},
206
+ "run_uuid_for_log": str(run_uuid),
207
+ "script_code_logged_at_start": True
208
+ }
209
+ config_file_path = run_dir_path / "config.json"
210
+ with open(config_file_path, "w") as f:
211
+ json.dump(config_to_save, f, indent=4)
212
+ print(f"Saved configuration to: {config_file_path}")
213
+
214
+ # convenience variables
215
+ B, T = args.device_batch_size, args.sequence_length
216
+ # calculate the number of steps to take in the val loop.
217
+ print(f"args.val_tokens: {args.val_tokens}, args.batch_size: {args.batch_size}, B: {B}, T: {T}, ddp_world_size: {ddp_world_size}")
218
+ assert args.val_tokens % (B * T * ddp_world_size) == 0
219
+ val_steps = args.val_tokens // (B * T * ddp_world_size)
220
+ # calculate the steps of gradient accumulation required to attain the desired global batch size.
221
+ assert args.batch_size % (B * ddp_world_size) == 0
222
+ train_accumulation_steps = args.batch_size // (B * ddp_world_size)
223
+
224
+ # load tokens
225
+ train_loader = DistributedDataLoader(args.input_bin, B, T, ddp_rank, ddp_world_size)
226
+ val_loader = DistributedDataLoader(args.input_val_bin, B, T, ddp_rank, ddp_world_size)
227
+ if master_process:
228
+ print(f"Training DataLoader: total number of tokens: {train_loader.ntok_total} across {len(train_loader.files)} files")
229
+ print(f"Validation DataLoader: total number of tokens: {val_loader.ntok_total} across {len(val_loader.files)} files")
230
+ x, y = train_loader.next_batch()
231
+
232
+ # there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. suggested to me by @Grad62304977.
233
+ # this originates from Karpathy's experiments.
234
+ num_vocab = 50304
235
+
236
+
237
+ if exp_args.model_parameterization == "qkvo":
238
+ from models.nano_GPT_qkvo_large import GPT, GPTConfig
239
+ # model = GPT(GPTConfig(vocab_size=num_vocab, n_layer=25, n_head=12, n_embd=1536))
240
+ model = GPT(GPTConfig(vocab_size=num_vocab, n_layer=36, n_head=20, n_embd=1280))
241
+ elif exp_args.model_parameterization == "gated":
242
+ from models.nano_GPT_gated_large import GPT, GPTConfig
243
+ model = GPT(GPTConfig(vocab_size=num_vocab, n_layer=27, n_head=20, n_embd=1280))
244
+
245
+
246
+
247
+ if master_process:
248
+ print(sum(p.numel() for p in model.parameters()))
249
+ model = model.cuda()
250
+ if hasattr(config, "coordinate_descent_tuning"):
251
+ config.coordinate_descent_tuning = True # suggested by @Chillee
252
+ model = torch.compile(model)
253
+ # here we wrap model into DDP container
254
+ model = DDP(model, device_ids=[ddp_local_rank])
255
+ raw_model = model.module # always contains the "raw" unwrapped model
256
+ ctx = torch.amp.autocast(device_type='cuda', dtype=torch.bfloat16)
257
+
258
+ # for name, param in raw_model.named_parameters():
259
+ # print(name, param.shape)
260
+
261
+ if exp_args.model_parameterization == "qkvo" :
262
+ print("PRINT: Collecting parameters for optimizers...")
263
+ head_params = [raw_model.lm_head.weight]
264
+ # embed_params = [raw_model.transformer.wte.weight]
265
+
266
+ # Granular collection for attention and MLP parts
267
+ attn_q_params = []
268
+ attn_k_params = []
269
+ attn_v_params = []
270
+ attn_o_params = [] # W_O from c_proj
271
+ mlp_fc_params = []
272
+ mlp_proj_params = []
273
+
274
+ for block_module in raw_model.transformer.h:
275
+ if block_module.attn is not None:
276
+ # These attributes (c_q, c_k, c_v) MUST exist in your CausalSelfAttention class
277
+ if hasattr(block_module.attn, 'c_q'): attn_q_params.append(block_module.attn.c_q.weight)
278
+ else:
279
+ print(f"PRINT: Warning: c_q not found in attn module of a block.")
280
+ if hasattr(block_module.attn, 'c_k'): attn_k_params.append(block_module.attn.c_k.weight)
281
+ else: print(f"PRINT: Warning: c_k not found in attn module of a block.")
282
+ if hasattr(block_module.attn, 'c_v'): attn_v_params.append(block_module.attn.c_v.weight)
283
+ else: print(f"PRINT: Warning: c_v not found in attn module of a block.")
284
+ attn_o_params.append(block_module.attn.c_proj.weight)
285
+ if block_module.mlp is not None:
286
+ mlp_fc_params.append(block_module.mlp.c_fc.weight)
287
+ mlp_proj_params.append(block_module.mlp.c_proj.weight)
288
+
289
+ # Combine into logical groups for experiments
290
+ attn_qk_group = attn_q_params + attn_k_params
291
+ attn_vo_group = attn_v_params + attn_o_params
292
+ all_attn_matrices = attn_qk_group + attn_vo_group
293
+ mlp_w1_group = mlp_fc_params
294
+ mlp_w2_group = mlp_proj_params
295
+ all_mlp_matrices = mlp_fc_params + mlp_proj_params
296
+
297
+ # Scalar parameters (all others not explicitly grouped as matrices)
298
+ # matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices)
299
+ matrix_params_for_scalar_check = set(head_params + all_attn_matrices + all_mlp_matrices)
300
+ scalar_params = [p for n, p in raw_model.named_parameters() if p not in matrix_params_for_scalar_check]
301
+ for p_scalar in scalar_params: # Sanity check
302
+ if p_scalar.ndim >=2:
303
+ print(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.")
304
+
305
+
306
+ # Determine parameter distribution based on optimizer_mode
307
+ muon_params_target_list = []
308
+ adam_matrix_target_list = [] # Matrices that Adam will handle specifically
309
+ adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned)
310
+
311
+ current_optimizer_mode = exp_args.optimizer_mode
312
+
313
+ print(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}")
314
+
315
+ if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params"
316
+ print(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.")
317
+ muon_params_target_list = all_attn_matrices + all_mlp_matrices
318
+ # Adam handles embeds, head, scalars by default. No extra matrices for Adam here.
319
+ elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP
320
+ print(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).")
321
+ muon_params_target_list = attn_qk_group
322
+ adam_matrix_target_list = attn_vo_group + all_mlp_matrices
323
+ elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP
324
+ print(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).")
325
+ muon_params_target_list = attn_vo_group
326
+ adam_matrix_target_list = attn_qk_group + all_mlp_matrices
327
+ elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP
328
+ print(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).")
329
+ muon_params_target_list = all_attn_matrices
330
+ adam_matrix_target_list = all_mlp_matrices
331
+ elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO)
332
+ print(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).")
333
+ muon_params_target_list = all_mlp_matrices
334
+ adam_matrix_target_list = all_attn_matrices
335
+ elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam
336
+ print(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).")
337
+ muon_params_target_list = []
338
+ adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam
339
+ elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP
340
+ print(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).")
341
+ muon_params_target_list = mlp_w2_group
342
+ adam_matrix_target_list = all_attn_matrices + mlp_w1_group
343
+ elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn
344
+ print(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).")
345
+ muon_params_target_list = attn_vo_group + all_mlp_matrices
346
+ adam_matrix_target_list = attn_qk_group
347
+ elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP
348
+ print(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).")
349
+ muon_params_target_list = attn_vo_group + mlp_w2_group
350
+ adam_matrix_target_list = attn_qk_group + mlp_w1_group
351
+ elif current_optimizer_mode == 9: # Muon on V Attn, MLP
352
+ print(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).")
353
+ muon_params_target_list = attn_v_params + all_mlp_matrices
354
+ adam_matrix_target_list = attn_o_params + attn_qk_group
355
+ elif current_optimizer_mode == 10: # Muon on O Attn, MLP
356
+ print(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).")
357
+ muon_params_target_list = attn_o_params + all_mlp_matrices
358
+ adam_matrix_target_list = attn_v_params + attn_qk_group
359
+ elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn
360
+ print(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).")
361
+ muon_params_target_list = mlp_w1_group
362
+ adam_matrix_target_list = all_attn_matrices + mlp_w2_group
363
+ elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others
364
+ print(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).")
365
+ muon_params_target_list = attn_vo_group + mlp_w1_group
366
+ adam_matrix_target_list = attn_qk_group + mlp_w2_group
367
+ elif current_optimizer_mode == 13:
368
+ print(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).")
369
+ muon_params_target_list = attn_o_params + mlp_w2_group
370
+ adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group
371
+ elif current_optimizer_mode == 14:
372
+ print(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).")
373
+ muon_params_target_list = attn_o_params
374
+ adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices
375
+ elif current_optimizer_mode == 15:
376
+ print(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).")
377
+ muon_params_target_list = attn_v_params
378
+ adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices
379
+ else:
380
+ raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}")
381
+
382
+ # Adam optimizer setup
383
+ adam_param_groups_config = [
384
+ dict(params=head_params, lr=adam_matrix_lr),
385
+ #dict(params=embed_params, lr=adam_matrix_lr),
386
+ dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam
387
+ ]
388
+ # Add matrices specifically assigned to Adam for this experiment mode
389
+ if adam_matrix_target_list:
390
+ # Ensure adam_matrix_target_list is flat and contains Parameters
391
+ flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None]
392
+ if flat_adam_matrices: # Only add group if there are params
393
+ adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr))
394
+
395
+ # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty)
396
+ adam_param_groups_config = [g for g in adam_param_groups_config if g['params']]
397
+ print(f"PRINT: The length of Adam param groups config: {len(adam_param_groups_config)}")
398
+ optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.9, 0.95), eps=1e-10, fused=True)
399
+ optimizers = [optimizer1] # Start with Adam
400
+
401
+ # Muon optimizer setup
402
+ # if muon_params_target_list:
403
+ # # Ensure muon_params_target_list is flat, unique, and contains Parameters
404
+ # flat_unique_muon_params = []
405
+ # seen_muon_ids = set()
406
+ # for sublist_or_p in muon_params_target_list:
407
+ # for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]):
408
+ # if p is not None and id(p) not in seen_muon_ids:
409
+ # flat_unique_muon_params.append(p)
410
+ # seen_muon_ids.add(id(p))
411
+
412
+ # muon_param_groups_config = []
413
+ # if flat_unique_muon_params:
414
+ # muon_param_groups_config.append(dict(params=flat_unique_muon_params, lr=exp_args.muon_lr))
415
+
416
+ # if flat_unique_muon_params: # Only create Muon if it has parameters
417
+ # optimizer2 = Muon(muon_param_groups_config, lr=exp_args.muon_lr, momentum=0.95,rank=ddp_rank, world_size=ddp_world_size) # Pass nesterov, ns_steps
418
+ # optimizers.append(optimizer2)
419
+ # else:
420
+ # print("PRINT: Muon optimizer not created as its target parameter list was empty.")
421
+ # optimizer2 = None # Explicitly set to None if not created
422
+ # else:
423
+ # print("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).")
424
+ # optimizer2 = None # Explicitly set to None
425
+ # Muon optimizer setup
426
+ if muon_params_target_list:
427
+ # Ensure muon_params_target_list is flat, unique, and contains Parameters
428
+ flat_unique_muon_params = []
429
+ seen_muon_ids = set()
430
+ for sublist_or_p in muon_params_target_list:
431
+ for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]):
432
+ if p is not None and id(p) not in seen_muon_ids:
433
+ flat_unique_muon_params.append(p)
434
+ seen_muon_ids.add(id(p))
435
+
436
+ if flat_unique_muon_params: # Only create Muon if it has parameters
437
+ optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95,rank=ddp_rank, world_size=ddp_world_size) # Pass nesterov, ns_steps
438
+ optimizers.append(optimizer2)
439
+ else:
440
+ print("PRINT: Muon optimizer not created as its target parameter list was empty.")
441
+ optimizer2 = None # Explicitly set to None if not created
442
+ else:
443
+ print("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).")
444
+ optimizer2 = None # Explicitly set to None
445
+
446
+ print(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}")
447
+ if optimizer2:
448
+ print(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.")
449
+ elif exp_args.model_parameterization == "gated":
450
+ print("PRINT: Collecting parameters for optimizers...")
451
+ head_params = [raw_model.lm_head.weight]
452
+ # embed_params = [raw_model.transformer.wte.weight]
453
+
454
+ # Granular collection for attention and MLP parts
455
+ attn_q_params = []
456
+ attn_k_params = []
457
+ attn_v_params = []
458
+ attn_o_params = [] # W_O from c_proj
459
+ mlp_fc_params = []
460
+ mlp_proj_params = []
461
+ mlp_up_params = []
462
+
463
+ for block_module in raw_model.transformer.h:
464
+ if block_module.attn is not None:
465
+ # These attributes (c_q, c_k, c_v) MUST exist in your CausalSelfAttention class
466
+ if hasattr(block_module.attn, 'c_q'): attn_q_params.append(block_module.attn.c_q.weight)
467
+ else:
468
+ print(f"PRINT: Warning: c_q not found in attn module of a block.")
469
+ if hasattr(block_module.attn, 'c_k'): attn_k_params.append(block_module.attn.c_k.weight)
470
+ else: print(f"PRINT: Warning: c_k not found in attn module of a block.")
471
+ if hasattr(block_module.attn, 'c_v'): attn_v_params.append(block_module.attn.c_v.weight)
472
+ else: print(f"PRINT: Warning: c_v not found in attn module of a block.")
473
+ attn_o_params.append(block_module.attn.c_proj.weight)
474
+ if block_module.mlp is not None:
475
+ mlp_fc_params.append(block_module.mlp.c_fc.weight)
476
+ mlp_proj_params.append(block_module.mlp.c_proj.weight)
477
+ mlp_up_params.append(block_module.mlp.c_up.weight)
478
+
479
+ # Combine into logical groups for experiments
480
+ attn_qk_group = attn_q_params + attn_k_params
481
+ attn_vo_group = attn_v_params + attn_o_params
482
+ all_attn_matrices = attn_qk_group + attn_vo_group
483
+ mlp_w1_group = mlp_fc_params + mlp_up_params
484
+ mlp_w2_group = mlp_proj_params
485
+ all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params
486
+
487
+ # Scalar parameters (all others not explicitly grouped as matrices)
488
+ # matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices)
489
+ matrix_params_for_scalar_check = set(head_params + all_attn_matrices + all_mlp_matrices)
490
+ scalar_params = [p for n, p in raw_model.named_parameters() if p not in matrix_params_for_scalar_check]
491
+ for p_scalar in scalar_params: # Sanity check
492
+ if p_scalar.ndim >=2:
493
+ print(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.")
494
+
495
+
496
+ # Determine parameter distribution based on optimizer_mode
497
+ muon_params_target_list = []
498
+ adam_matrix_target_list = [] # Matrices that Adam will handle specifically
499
+ adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned)
500
+
501
+ current_optimizer_mode = exp_args.optimizer_mode
502
+
503
+ print(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}")
504
+
505
+ if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params"
506
+ print(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.")
507
+ muon_params_target_list = all_attn_matrices + all_mlp_matrices
508
+ # Adam handles embeds, head, scalars by default. No extra matrices for Adam here.
509
+ elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP
510
+ print(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).")
511
+ muon_params_target_list = attn_qk_group
512
+ adam_matrix_target_list = attn_vo_group + all_mlp_matrices
513
+ elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP
514
+ print(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).")
515
+ muon_params_target_list = attn_vo_group
516
+ adam_matrix_target_list = attn_qk_group + all_mlp_matrices
517
+ elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP
518
+ print(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).")
519
+ muon_params_target_list = all_attn_matrices
520
+ adam_matrix_target_list = all_mlp_matrices
521
+ elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO)
522
+ print(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).")
523
+ muon_params_target_list = all_mlp_matrices
524
+ adam_matrix_target_list = all_attn_matrices
525
+ elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam
526
+ print(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).")
527
+ muon_params_target_list = []
528
+ adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam
529
+ elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP
530
+ print(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).")
531
+ muon_params_target_list = mlp_w2_group
532
+ adam_matrix_target_list = all_attn_matrices + mlp_w1_group
533
+ elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn
534
+ print(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).")
535
+ muon_params_target_list = attn_vo_group + all_mlp_matrices
536
+ adam_matrix_target_list = attn_qk_group
537
+ elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP
538
+ print(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).")
539
+ muon_params_target_list = attn_vo_group + mlp_w2_group
540
+ adam_matrix_target_list = attn_qk_group + mlp_w1_group
541
+ elif current_optimizer_mode == 9: # Muon on V Attn, MLP
542
+ print(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).")
543
+ muon_params_target_list = attn_v_params + all_mlp_matrices
544
+ adam_matrix_target_list = attn_o_params + attn_qk_group
545
+ elif current_optimizer_mode == 10: # Muon on O Attn, MLP
546
+ print(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).")
547
+ muon_params_target_list = attn_o_params + all_mlp_matrices
548
+ adam_matrix_target_list = attn_v_params + attn_qk_group
549
+ elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn
550
+ print(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).")
551
+ muon_params_target_list = mlp_w1_group
552
+ adam_matrix_target_list = all_attn_matrices + mlp_w2_group
553
+ elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others
554
+ print(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).")
555
+ muon_params_target_list = attn_vo_group + mlp_w1_group
556
+ adam_matrix_target_list = attn_qk_group + mlp_w2_group
557
+ elif current_optimizer_mode == 13:
558
+ print(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).")
559
+ muon_params_target_list = attn_o_params + mlp_w2_group
560
+ adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group
561
+ elif current_optimizer_mode == 14:
562
+ print(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).")
563
+ muon_params_target_list = attn_o_params
564
+ adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices
565
+ elif current_optimizer_mode == 15:
566
+ print(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).")
567
+ muon_params_target_list = attn_v_params
568
+ adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices
569
+ else:
570
+ raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}")
571
+
572
+ # Adam optimizer setup
573
+ adam_param_groups_config = [
574
+ dict(params=head_params, lr=adam_matrix_lr),
575
+ # dict(params=embed_params, lr=adam_matrix_lr),
576
+ dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam
577
+ ]
578
+
579
+ # Add matrices specifically assigned to Adam for this experiment mode
580
+ if adam_matrix_target_list:
581
+ # Ensure adam_matrix_target_list is flat and contains Parameters
582
+ flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None]
583
+ if flat_adam_matrices: # Only add group if there are params
584
+ adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr))
585
+
586
+ # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty)
587
+ adam_param_groups_config = [g for g in adam_param_groups_config if g['params']]
588
+ # print(f"PRINT: The length of Adam param groups config: {len(adam_param_groups_config)}")
589
+ optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.9, 0.95), eps=1e-10, fused=True)
590
+ optimizers = [optimizer1] # Start with Adam
591
+
592
+
593
+ if muon_params_target_list:
594
+ # Ensure muon_params_target_list is flat, unique, and contains Parameters
595
+ flat_unique_muon_params = []
596
+ seen_muon_ids = set()
597
+ for sublist_or_p in muon_params_target_list:
598
+ for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]):
599
+ if p is not None and id(p) not in seen_muon_ids:
600
+ flat_unique_muon_params.append(p)
601
+ seen_muon_ids.add(id(p))
602
+
603
+ if flat_unique_muon_params: # Only create Muon if it has parameters
604
+ optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95,rank=ddp_rank, world_size=ddp_world_size) # Pass nesterov, ns_steps
605
+ optimizers.append(optimizer2)
606
+ else:
607
+ print("PRINT: Muon optimizer not created as its target parameter list was empty.")
608
+ optimizer2 = None # Explicitly set to None if not created
609
+ else:
610
+ print("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).")
611
+ optimizer2 = None # Explicitly set to None
612
+
613
+ print(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}")
614
+ if optimizer2:
615
+ print(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.")
616
+
617
+ # optimizer1 = torch.optim.AdamW(raw_model.lm_head.parameters(), lr=args.learning_rate, betas=(0.9, 0.95),
618
+ # weight_decay=args.weight_decay, fused=True)
619
+ # optimizer2 = Muon(raw_model.transformer.h.parameters(), lr=0.1*args.learning_rate, momentum=0.95,
620
+ # rank=ddp_rank, world_size=ddp_world_size)
621
+
622
+ # optimizers = [optimizer1, optimizer2]
623
+ # learning rate decay scheduler (linear warmup and warmdown)
624
+ def get_lr(it):
625
+ assert it <= args.num_iterations
626
+ # 1) linear warmup for warmup_iters steps
627
+ if it < args.warmup_iters:
628
+ return (it+1) / args.warmup_iters
629
+ # 2) constant lr for a while
630
+ elif it < args.num_iterations - args.warmdown_iters:
631
+ return 1.0
632
+ # 3) linear warmdown
633
+ else:
634
+ decay_ratio = (args.num_iterations - it) / args.warmdown_iters
635
+ return decay_ratio
636
+ schedulers = [torch.optim.lr_scheduler.LambdaLR(opt, get_lr) for opt in optimizers]
637
+
638
+ if master_process:
639
+ with open(logfile, "a") as f:
640
+ f.write(code)
641
+
642
+ training_time_ms = 0
643
+ # start the clock
644
+ torch.cuda.synchronize()
645
+ t0 = time.time()
646
+ # begin training
647
+ train_loader.reset()
648
+ for step in range(args.num_iterations + 1):
649
+ last_step = (step == args.num_iterations)
650
+ # This effectively ignores timing first 10 steps, which are slower for weird reasons.
651
+ # Alternately, and slightly more correctly in terms of benchmarking, we could do 10
652
+ # steps with dummy data first, and then re-initialize the model and reset the loader.
653
+ if step == 10:
654
+ training_time_ms = 0
655
+ t0 = time.time()
656
+ timed_steps = float('nan') if step <= 11 else (step - 10) + 1 # <= 11 to avoid bug in val
657
+
658
+ # once in a while evaluate the validation dataset
659
+ if (last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0)):
660
+ # stop the clock
661
+ torch.cuda.synchronize()
662
+ training_time_ms += 1000 * (time.time() - t0)
663
+ # run validation batches
664
+ with torch.no_grad():
665
+ val_loader.reset()
666
+ val_loss = 0.0
667
+ for _ in range(val_steps):
668
+ x_val, y_val = val_loader.next_batch()
669
+ with ctx: # of course, we'd like to use no_grad() here too, but that creates a torch.compile error for some reason
670
+ _, loss = model(x_val, y_val, return_logits=False)
671
+ val_loss += loss.detach()
672
+ del loss
673
+ dist.all_reduce(val_loss, op=dist.ReduceOp.AVG)
674
+ val_loss /= val_steps
675
+ # log val loss to console and to logfile
676
+ if master_process:
677
+ print(f'step:{step}/{args.num_iterations} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/(timed_steps-1):.2f}ms')
678
+ with open(logfile, "a") as f:
679
+ f.write(f'step:{step}/{args.num_iterations} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/(timed_steps-1):.2f}ms\n')
680
+ # start the clock again
681
+ torch.cuda.synchronize()
682
+ t0 = time.time()
683
+
684
+ if master_process and (last_step or (args.save_every > 0 and step % args.save_every == 0)):
685
+ # stop the clock
686
+ torch.cuda.synchronize()
687
+ training_time_ms += 1000 * (time.time() - t0)
688
+ # save the state of the training process
689
+ log = dict(step=step, code=code, model=raw_model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers])
690
+ torch.save(log, 'logs/%s/state_step%06d.pt' % (run_id, step))
691
+ # start the clock again
692
+ torch.cuda.synchronize()
693
+ t0 = time.time()
694
+
695
+ # bit confusing: we want to make sure to eval on 0th iteration
696
+ # but also after the very last iteration. so we loop for step <= num_iterations
697
+ # instead of just < num_iterations (one extra due to <=), only to do
698
+ # the validation/sampling one last time, and then we break right here as we're done.
699
+ if last_step:
700
+ break
701
+
702
+ # --------------- TRAINING SECTION BEGIN -----------------
703
+ model.train()
704
+ for i in range(1, train_accumulation_steps+1):
705
+ # forward pass
706
+ with ctx:
707
+ _, loss = model(x, y, return_logits=False)
708
+ train_loss = loss.detach()
709
+ # advance the dataset for the next batch
710
+ x, y = train_loader.next_batch()
711
+ # backward pass
712
+ if i < train_accumulation_steps:
713
+ with model.no_sync(): # there's no need to sync gradients every accumulation step
714
+ loss.backward()
715
+ else:
716
+ loss.backward() # just sync on the last step
717
+ for p in model.parameters():
718
+ p.grad /= train_accumulation_steps
719
+ # step the optimizers and schedulers
720
+ for opt, sched in zip(optimizers, schedulers):
721
+ opt.step()
722
+ sched.step()
723
+ # null the gradients
724
+ model.zero_grad(set_to_none=True)
725
+ # --------------- TRAINING SECTION END -------------------
726
+ # everything that follows now is just diagnostics, prints, logging, etc.
727
+
728
+ #dist.all_reduce(train_loss, op=dist.ReduceOp.AVG) # all-reducing the training loss would be more correct in terms of logging, but slower
729
+ if master_process:
730
+ approx_time = training_time_ms + 1000 * (time.time() - t0)
731
+ print(f"step:{step+1}/{args.num_iterations} train_loss:{train_loss.item():.4f} train_time:{approx_time:.0f}ms step_avg:{approx_time/timed_steps:.2f}ms")
732
+ with open(logfile, "a") as f:
733
+ f.write(f"step:{step+1}/{args.num_iterations} train_loss:{train_loss.item():.4f} train_time:{approx_time:.0f}ms step_avg:{approx_time/timed_steps:.2f}ms\n")
734
+
735
+ if master_process:
736
+ print(f"peak memory consumption: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB")step:0/6000 val_loss:10.9980 train_time:147ms step_avg:nanms
737
+ step:1/6000 train_loss:11.0032 train_time:40163ms step_avg:nanms
738
+ step:2/6000 train_loss:10.0867 train_time:43651ms step_avg:nanms
739
+ step:3/6000 train_loss:9.3940 train_time:47079ms step_avg:nanms
740
+ step:4/6000 train_loss:8.8213 train_time:50448ms step_avg:nanms
741
+ step:5/6000 train_loss:8.5063 train_time:53816ms step_avg:nanms
742
+ step:6/6000 train_loss:8.3227 train_time:57183ms step_avg:nanms
743
+ step:7/6000 train_loss:7.8312 train_time:60551ms step_avg:nanms
744
+ step:8/6000 train_loss:7.9443 train_time:63915ms step_avg:nanms
745
+ step:9/6000 train_loss:7.6822 train_time:67277ms step_avg:nanms
746
+ step:10/6000 train_loss:7.9030 train_time:70641ms step_avg:nanms
747
+ step:11/6000 train_loss:7.9286 train_time:3266ms step_avg:nanms
748
+ step:12/6000 train_loss:7.7966 train_time:6632ms step_avg:nanms
749
+ step:13/6000 train_loss:8.2405 train_time:10001ms step_avg:3333.77ms
750
+ step:14/6000 train_loss:7.7617 train_time:13371ms step_avg:3342.81ms
751
+ step:15/6000 train_loss:7.7143 train_time:16742ms step_avg:3348.42ms
752
+ step:16/6000 train_loss:7.7441 train_time:20115ms step_avg:3352.52ms
753
+ step:17/6000 train_loss:7.4602 train_time:23486ms step_avg:3355.14ms
754
+ step:18/6000 train_loss:7.7831 train_time:26862ms step_avg:3357.80ms
755
+ step:19/6000 train_loss:7.7015 train_time:30235ms step_avg:3359.50ms
756
+ step:20/6000 train_loss:7.4613 train_time:33610ms step_avg:3361.04ms
757
+ step:21/6000 train_loss:7.7094 train_time:36985ms step_avg:3362.29ms
758
+ step:22/6000 train_loss:7.8955 train_time:40362ms step_avg:3363.51ms
759
+ step:23/6000 train_loss:7.9146 train_time:43739ms step_avg:3364.55ms
760
+ step:24/6000 train_loss:7.6228 train_time:47119ms step_avg:3365.61ms
761
+ step:25/6000 train_loss:7.7719 train_time:50501ms step_avg:3366.72ms
762
+ step:26/6000 train_loss:7.3381 train_time:53887ms step_avg:3367.93ms
763
+ step:27/6000 train_loss:7.3544 train_time:57276ms step_avg:3369.18ms
764
+ step:28/6000 train_loss:7.2266 train_time:60667ms step_avg:3370.38ms
765
+ step:29/6000 train_loss:7.7346 train_time:64062ms step_avg:3371.70ms
766
+ step:30/6000 train_loss:7.3986 train_time:67459ms step_avg:3372.94ms
767
+ step:31/6000 train_loss:7.5775 train_time:70856ms step_avg:3374.09ms
768
+ step:32/6000 train_loss:7.4900 train_time:74253ms step_avg:3375.15ms
769
+ step:33/6000 train_loss:7.3644 train_time:77655ms step_avg:3376.29ms
770
+ step:34/6000 train_loss:9.5895 train_time:81055ms step_avg:3377.31ms
771
+ step:35/6000 train_loss:7.4402 train_time:84460ms step_avg:3378.42ms
772
+ step:36/6000 train_loss:7.4615 train_time:87865ms step_avg:3379.42ms
773
+ step:37/6000 train_loss:7.8023 train_time:91270ms step_avg:3380.37ms
774
+ step:38/6000 train_loss:7.1083 train_time:94683ms step_avg:3381.52ms
775
+ step:39/6000 train_loss:7.3571 train_time:98092ms step_avg:3382.49ms
776
+ step:40/6000 train_loss:7.2065 train_time:101505ms step_avg:3383.50ms
777
+ step:41/6000 train_loss:8.4150 train_time:104913ms step_avg:3384.30ms
778
+ step:42/6000 train_loss:7.1353 train_time:108324ms step_avg:3385.13ms
779
+ step:43/6000 train_loss:7.0507 train_time:111738ms step_avg:3386.00ms
780
+ step:44/6000 train_loss:7.2028 train_time:115147ms step_avg:3386.68ms
781
+ step:45/6000 train_loss:8.0166 train_time:118560ms step_avg:3387.44ms
782
+ step:46/6000 train_loss:7.0652 train_time:121970ms step_avg:3388.05ms
783
+ step:47/6000 train_loss:6.8885 train_time:125379ms step_avg:3388.61ms
784
+ step:48/6000 train_loss:7.0325 train_time:128786ms step_avg:3389.10ms
785
+ step:49/6000 train_loss:6.8367 train_time:132195ms step_avg:3389.61ms
786
+ step:50/6000 train_loss:7.0070 train_time:135606ms step_avg:3390.16ms
787
+ step:51/6000 train_loss:7.0726 train_time:139017ms step_avg:3390.66ms
788
+ step:52/6000 train_loss:6.9066 train_time:142429ms step_avg:3391.17ms
789
+ step:53/6000 train_loss:6.6981 train_time:145842ms step_avg:3391.68ms
790
+ step:54/6000 train_loss:6.9595 train_time:149256ms step_avg:3392.17ms
791
+ step:55/6000 train_loss:6.9502 train_time:152673ms step_avg:3392.73ms
792
+ step:56/6000 train_loss:7.0270 train_time:156085ms step_avg:3393.15ms
793
+ step:57/6000 train_loss:6.5760 train_time:159500ms step_avg:3393.61ms
794
+ step:58/6000 train_loss:6.9247 train_time:162916ms step_avg:3394.08ms
795
+ step:59/6000 train_loss:6.6995 train_time:166331ms step_avg:3394.51ms
796
+ step:60/6000 train_loss:6.7923 train_time:169748ms step_avg:3394.97ms
797
+ step:61/6000 train_loss:6.7997 train_time:173163ms step_avg:3395.36ms
798
+ step:62/6000 train_loss:7.0957 train_time:176582ms step_avg:3395.81ms
799
+ step:63/6000 train_loss:6.8985 train_time:180000ms step_avg:3396.23ms
800
+ step:64/6000 train_loss:6.7014 train_time:183439ms step_avg:3397.02ms
801
+ step:65/6000 train_loss:6.9532 train_time:186860ms step_avg:3397.46ms
802
+ step:66/6000 train_loss:7.1222 train_time:190281ms step_avg:3397.88ms
803
+ step:67/6000 train_loss:6.7153 train_time:193700ms step_avg:3398.25ms
804
+ step:68/6000 train_loss:6.9772 train_time:197122ms step_avg:3398.65ms
805
+ step:69/6000 train_loss:6.7818 train_time:200542ms step_avg:3399.02ms
806
+ step:70/6000 train_loss:6.8963 train_time:203964ms step_avg:3399.40ms
807
+ step:71/6000 train_loss:6.6256 train_time:207385ms step_avg:3399.76ms
808
+ step:72/6000 train_loss:6.4692 train_time:210807ms step_avg:3400.11ms
809
+ step:73/6000 train_loss:7.0340 train_time:214228ms step_avg:3400.44ms
810
+ step:74/6000 train_loss:6.6450 train_time:217650ms step_avg:3400.78ms
811
+ step:75/6000 train_loss:6.5455 train_time:221072ms step_avg:3401.11ms
812
+ step:76/6000 train_loss:6.9795 train_time:224495ms step_avg:3401.44ms
813
+ step:77/6000 train_loss:6.7381 train_time:227918ms step_avg:3401.76ms
814
+ step:78/6000 train_loss:6.6731 train_time:231340ms step_avg:3402.07ms
815
+ step:79/6000 train_loss:6.6018 train_time:234762ms step_avg:3402.35ms
816
+ step:80/6000 train_loss:7.0440 train_time:238185ms step_avg:3402.64ms
817
+ step:81/6000 train_loss:6.2447 train_time:241610ms step_avg:3402.96ms
818
+ step:82/6000 train_loss:6.7507 train_time:245034ms step_avg:3403.25ms
819
+ step:83/6000 train_loss:6.5987 train_time:248458ms step_avg:3403.54ms
820
+ step:84/6000 train_loss:6.6679 train_time:251883ms step_avg:3403.83ms
821
+ step:85/6000 train_loss:6.5572 train_time:255310ms step_avg:3404.13ms
822
+ step:86/6000 train_loss:6.5454 train_time:258738ms step_avg:3404.45ms
823
+ step:87/6000 train_loss:6.7837 train_time:262162ms step_avg:3404.70ms
824
+ step:88/6000 train_loss:6.6183 train_time:265587ms step_avg:3404.97ms
825
+ step:89/6000 train_loss:6.5827 train_time:269015ms step_avg:3405.25ms
826
+ step:90/6000 train_loss:6.2560 train_time:272441ms step_avg:3405.51ms
827
+ step:91/6000 train_loss:6.5403 train_time:275867ms step_avg:3405.77ms
828
+ step:92/6000 train_loss:6.7094 train_time:279295ms step_avg:3406.03ms
829
+ step:93/6000 train_loss:6.3179 train_time:282723ms step_avg:3406.30ms
830
+ step:94/6000 train_loss:6.4432 train_time:286150ms step_avg:3406.55ms
831
+ step:95/6000 train_loss:6.5397 train_time:289577ms step_avg:3406.78ms
832
+ step:96/6000 train_loss:6.5843 train_time:293007ms step_avg:3407.05ms
833
+ step:97/6000 train_loss:6.4347 train_time:296434ms step_avg:3407.28ms
834
+ step:98/6000 train_loss:6.4028 train_time:299861ms step_avg:3407.51ms
835
+ step:99/6000 train_loss:6.4276 train_time:303287ms step_avg:3407.72ms
836
+ step:100/6000 train_loss:6.5565 train_time:306715ms step_avg:3407.95ms
837
+ step:101/6000 train_loss:6.5317 train_time:310142ms step_avg:3408.16ms
838
+ step:102/6000 train_loss:6.3285 train_time:316608ms step_avg:3441.39ms
839
+ step:103/6000 train_loss:6.2299 train_time:320028ms step_avg:3441.16ms
840
+ step:104/6000 train_loss:6.6324 train_time:323450ms step_avg:3440.96ms
841
+ step:105/6000 train_loss:6.5289 train_time:326874ms step_avg:3440.78ms
842
+ step:106/6000 train_loss:6.3982 train_time:330303ms step_avg:3440.66ms
843
+ step:107/6000 train_loss:5.9289 train_time:333733ms step_avg:3440.54ms
844
+ step:108/6000 train_loss:6.8416 train_time:337165ms step_avg:3440.46ms
845
+ step:109/6000 train_loss:6.3654 train_time:340597ms step_avg:3440.37ms
846
+ step:110/6000 train_loss:6.3199 train_time:344028ms step_avg:3440.28ms
847
+ step:111/6000 train_loss:6.4951 train_time:347463ms step_avg:3440.23ms
848
+ step:112/6000 train_loss:6.4661 train_time:350897ms step_avg:3440.17ms
849
+ step:113/6000 train_loss:6.3542 train_time:354332ms step_avg:3440.12ms
850
+ step:114/6000 train_loss:6.1977 train_time:357768ms step_avg:3440.08ms
851
+ step:115/6000 train_loss:6.4477 train_time:361206ms step_avg:3440.06ms
852
+ step:116/6000 train_loss:6.1892 train_time:364641ms step_avg:3440.01ms
853
+ step:117/6000 train_loss:6.3011 train_time:368074ms step_avg:3439.95ms
854
+ step:118/6000 train_loss:6.2822 train_time:371509ms step_avg:3439.90ms
855
+ step:119/6000 train_loss:6.4953 train_time:374944ms step_avg:3439.85ms
856
+ step:120/6000 train_loss:6.3468 train_time:378379ms step_avg:3439.81ms
857
+ step:121/6000 train_loss:6.0975 train_time:381812ms step_avg:3439.75ms
858
+ step:122/6000 train_loss:6.2077 train_time:385244ms step_avg:3439.68ms
859
+ step:123/6000 train_loss:6.2784 train_time:388680ms step_avg:3439.64ms
860
+ step:124/6000 train_loss:6.2644 train_time:392113ms step_avg:3439.59ms
861
+ step:125/6000 train_loss:5.9758 train_time:395546ms step_avg:3439.53ms
862
+ step:125/6000 val_loss:6.2202 train_time:395644ms step_avg:3440.38ms
863
+ step:126/6000 train_loss:6.2274 train_time:398970ms step_avg:3439.39ms
864
+ step:127/6000 train_loss:6.2286 train_time:402398ms step_avg:3439.30ms
865
+ step:128/6000 train_loss:5.9971 train_time:405829ms step_avg:3439.23ms
866
+ step:129/6000 train_loss:7.3516 train_time:409261ms step_avg:3439.17ms
867
+ step:130/6000 train_loss:6.2489 train_time:412687ms step_avg:3439.06ms
868
+ step:131/6000 train_loss:6.1241 train_time:416120ms step_avg:3439.01ms
869
+ step:132/6000 train_loss:6.0882 train_time:419552ms step_avg:3438.95ms
870
+ step:133/6000 train_loss:6.3320 train_time:422982ms step_avg:3438.88ms
871
+ step:134/6000 train_loss:6.1205 train_time:426413ms step_avg:3438.81ms
872
+ step:135/6000 train_loss:6.0544 train_time:429844ms step_avg:3438.75ms
873
+ step:136/6000 train_loss:5.9895 train_time:433275ms step_avg:3438.69ms
874
+ step:137/6000 train_loss:6.4605 train_time:436704ms step_avg:3438.61ms
875
+ step:138/6000 train_loss:5.9376 train_time:440133ms step_avg:3438.54ms
876
+ step:139/6000 train_loss:6.1267 train_time:443563ms step_avg:3438.47ms
877
+ step:140/6000 train_loss:6.2660 train_time:446996ms step_avg:3438.43ms
878
+ step:141/6000 train_loss:6.2711 train_time:450427ms step_avg:3438.38ms
879
+ step:142/6000 train_loss:5.9940 train_time:453860ms step_avg:3438.33ms
880
+ step:143/6000 train_loss:6.2173 train_time:457291ms step_avg:3438.28ms
881
+ step:144/6000 train_loss:5.7843 train_time:460719ms step_avg:3438.20ms
882
+ step:145/6000 train_loss:6.0585 train_time:464148ms step_avg:3438.13ms
883
+ step:146/6000 train_loss:6.8930 train_time:467578ms step_avg:3438.07ms
884
+ step:147/6000 train_loss:6.0025 train_time:471005ms step_avg:3437.99ms
885
+ step:148/6000 train_loss:5.9709 train_time:474433ms step_avg:3437.92ms
886
+ step:149/6000 train_loss:6.0879 train_time:477861ms step_avg:3437.85ms
887
+ step:150/6000 train_loss:5.8014 train_time:481292ms step_avg:3437.80ms
888
+ step:151/6000 train_loss:5.8108 train_time:484719ms step_avg:3437.72ms
889
+ step:152/6000 train_loss:6.2351 train_time:488149ms step_avg:3437.67ms
890
+ step:153/6000 train_loss:6.0089 train_time:491580ms step_avg:3437.62ms
891
+ step:154/6000 train_loss:5.9436 train_time:495011ms step_avg:3437.58ms
892
+ step:155/6000 train_loss:5.9776 train_time:498442ms step_avg:3437.53ms
893
+ step:156/6000 train_loss:6.0841 train_time:501874ms step_avg:3437.49ms
894
+ step:157/6000 train_loss:5.8146 train_time:505308ms step_avg:3437.47ms
895
+ step:158/6000 train_loss:5.9461 train_time:508739ms step_avg:3437.42ms
896
+ step:159/6000 train_loss:5.8189 train_time:512169ms step_avg:3437.37ms
897
+ step:160/6000 train_loss:5.7114 train_time:515598ms step_avg:3437.32ms
898
+ step:161/6000 train_loss:5.8973 train_time:519027ms step_avg:3437.27ms
899
+ step:162/6000 train_loss:6.1168 train_time:522459ms step_avg:3437.23ms
900
+ step:163/6000 train_loss:5.9027 train_time:525887ms step_avg:3437.17ms
901
+ step:164/6000 train_loss:5.9478 train_time:529316ms step_avg:3437.12ms
902
+ step:165/6000 train_loss:5.9128 train_time:532743ms step_avg:3437.05ms
903
+ step:166/6000 train_loss:5.9402 train_time:536169ms step_avg:3436.98ms
904
+ step:167/6000 train_loss:5.7465 train_time:539596ms step_avg:3436.91ms
905
+ step:168/6000 train_loss:6.1393 train_time:543024ms step_avg:3436.86ms
906
+ step:169/6000 train_loss:6.0217 train_time:546452ms step_avg:3436.81ms
907
+ step:170/6000 train_loss:5.8525 train_time:549881ms step_avg:3436.75ms
908
+ step:171/6000 train_loss:5.9346 train_time:553306ms step_avg:3436.68ms
909
+ step:172/6000 train_loss:5.8628 train_time:556736ms step_avg:3436.64ms
910
+ step:173/6000 train_loss:6.2714 train_time:560165ms step_avg:3436.59ms
911
+ step:174/6000 train_loss:5.8207 train_time:563594ms step_avg:3436.55ms
912
+ step:175/6000 train_loss:5.9392 train_time:567023ms step_avg:3436.50ms
913
+ step:176/6000 train_loss:6.0277 train_time:570451ms step_avg:3436.45ms
914
+ step:177/6000 train_loss:5.7605 train_time:573881ms step_avg:3436.41ms
915
+ step:178/6000 train_loss:5.8249 train_time:577311ms step_avg:3436.38ms
916
+ step:179/6000 train_loss:5.7059 train_time:580739ms step_avg:3436.33ms
917
+ step:180/6000 train_loss:7.6142 train_time:584172ms step_avg:3436.30ms
918
+ step:181/6000 train_loss:6.0449 train_time:587598ms step_avg:3436.24ms
919
+ step:182/6000 train_loss:5.9221 train_time:591025ms step_avg:3436.19ms
920
+ step:183/6000 train_loss:6.1453 train_time:594456ms step_avg:3436.16ms
921
+ step:184/6000 train_loss:5.8522 train_time:597887ms step_avg:3436.13ms
922
+ step:185/6000 train_loss:5.7065 train_time:601317ms step_avg:3436.10ms
923
+ step:186/6000 train_loss:5.8987 train_time:604745ms step_avg:3436.05ms
924
+ step:187/6000 train_loss:5.5877 train_time:608173ms step_avg:3436.00ms
925
+ step:188/6000 train_loss:5.6168 train_time:611601ms step_avg:3435.96ms
926
+ step:189/6000 train_loss:5.8485 train_time:615029ms step_avg:3435.92ms
927
+ step:190/6000 train_loss:5.8701 train_time:618457ms step_avg:3435.87ms
928
+ step:191/6000 train_loss:5.9194 train_time:621886ms step_avg:3435.84ms
929
+ step:192/6000 train_loss:5.7652 train_time:625313ms step_avg:3435.78ms
930
+ step:193/6000 train_loss:5.7282 train_time:628738ms step_avg:3435.73ms
931
+ step:194/6000 train_loss:5.5799 train_time:632164ms step_avg:3435.67ms
932
+ step:195/6000 train_loss:5.5540 train_time:635590ms step_avg:3435.62ms
933
+ step:196/6000 train_loss:5.9021 train_time:639015ms step_avg:3435.57ms
934
+ step:197/6000 train_loss:5.8146 train_time:642443ms step_avg:3435.52ms
935
+ step:198/6000 train_loss:5.7791 train_time:645871ms step_avg:3435.48ms
936
+ step:199/6000 train_loss:5.7060 train_time:649296ms step_avg:3435.43ms
937
+ step:200/6000 train_loss:5.6925 train_time:652722ms step_avg:3435.38ms
938
+ step:201/6000 train_loss:6.4219 train_time:656152ms step_avg:3435.35ms
939
+ step:202/6000 train_loss:5.4622 train_time:659580ms step_avg:3435.31ms
940
+ step:203/6000 train_loss:5.7132 train_time:663007ms step_avg:3435.27ms
941
+ step:204/6000 train_loss:5.8492 train_time:667029ms step_avg:3438.29ms
942
+ step:205/6000 train_loss:5.6963 train_time:670454ms step_avg:3438.22ms
943
+ step:206/6000 train_loss:5.8170 train_time:673879ms step_avg:3438.16ms
944
+ step:207/6000 train_loss:5.6781 train_time:677304ms step_avg:3438.09ms
945
+ step:208/6000 train_loss:5.6581 train_time:680731ms step_avg:3438.04ms
946
+ step:209/6000 train_loss:5.5768 train_time:684158ms step_avg:3437.98ms
947
+ step:210/6000 train_loss:5.5374 train_time:687585ms step_avg:3437.93ms
948
+ step:211/6000 train_loss:5.6328 train_time:691011ms step_avg:3437.86ms
949
+ step:212/6000 train_loss:5.5371 train_time:694438ms step_avg:3437.81ms
950
+ step:213/6000 train_loss:5.7779 train_time:697868ms step_avg:3437.77ms
951
+ step:214/6000 train_loss:5.6459 train_time:701292ms step_avg:3437.71ms
952
+ step:215/6000 train_loss:5.5111 train_time:704717ms step_avg:3437.64ms
953
+ step:216/6000 train_loss:5.5370 train_time:708144ms step_avg:3437.59ms
954
+ step:217/6000 train_loss:5.7410 train_time:711569ms step_avg:3437.53ms
955
+ step:218/6000 train_loss:5.5489 train_time:714993ms step_avg:3437.47ms
956
+ step:219/6000 train_loss:5.5209 train_time:718419ms step_avg:3437.41ms
957
+ step:220/6000 train_loss:5.6407 train_time:721846ms step_avg:3437.36ms
958
+ step:221/6000 train_loss:5.5778 train_time:725269ms step_avg:3437.30ms
959
+ step:222/6000 train_loss:5.5257 train_time:728694ms step_avg:3437.24ms
960
+ step:223/6000 train_loss:5.5941 train_time:732118ms step_avg:3437.17ms
961
+ step:224/6000 train_loss:5.4758 train_time:735542ms step_avg:3437.11ms
962
+ step:225/6000 train_loss:5.5554 train_time:738969ms step_avg:3437.07ms
963
+ step:226/6000 train_loss:5.3976 train_time:742394ms step_avg:3437.01ms
964
+ step:227/6000 train_loss:5.5843 train_time:745823ms step_avg:3436.97ms
965
+ step:228/6000 train_loss:5.4941 train_time:749249ms step_avg:3436.92ms
966
+ step:229/6000 train_loss:5.3370 train_time:752674ms step_avg:3436.87ms
967
+ step:230/6000 train_loss:5.3272 train_time:756095ms step_avg:3436.80ms
968
+ step:231/6000 train_loss:5.3964 train_time:759521ms step_avg:3436.75ms
969
+ step:232/6000 train_loss:5.6522 train_time:762945ms step_avg:3436.69ms
970
+ step:233/6000 train_loss:5.3876 train_time:766370ms step_avg:3436.64ms
971
+ step:234/6000 train_loss:5.5173 train_time:769797ms step_avg:3436.59ms
972
+ step:235/6000 train_loss:5.4452 train_time:773222ms step_avg:3436.54ms
973
+ step:236/6000 train_loss:5.5613 train_time:776647ms step_avg:3436.49ms
974
+ step:237/6000 train_loss:5.6494 train_time:780072ms step_avg:3436.44ms
975
+ step:238/6000 train_loss:5.2440 train_time:783498ms step_avg:3436.39ms
976
+ step:239/6000 train_loss:5.5541 train_time:786923ms step_avg:3436.34ms
977
+ step:240/6000 train_loss:5.4429 train_time:790351ms step_avg:3436.31ms
978
+ step:241/6000 train_loss:5.4246 train_time:793772ms step_avg:3436.24ms
979
+ step:242/6000 train_loss:5.2503 train_time:797195ms step_avg:3436.18ms
980
+ step:243/6000 train_loss:5.3397 train_time:800620ms step_avg:3436.14ms
981
+ step:244/6000 train_loss:5.3209 train_time:804043ms step_avg:3436.08ms
982
+ step:245/6000 train_loss:5.3957 train_time:807465ms step_avg:3436.02ms
983
+ step:246/6000 train_loss:5.3504 train_time:810889ms step_avg:3435.97ms
984
+ step:247/6000 train_loss:5.0592 train_time:814310ms step_avg:3435.91ms
985
+ step:248/6000 train_loss:5.3366 train_time:817732ms step_avg:3435.85ms
986
+ step:249/6000 train_loss:5.4224 train_time:821155ms step_avg:3435.79ms
987
+ step:250/6000 train_loss:5.3302 train_time:824579ms step_avg:3435.75ms
988
+ step:250/6000 val_loss:5.3795 train_time:824677ms step_avg:3436.16ms
989
+ step:251/6000 train_loss:5.3206 train_time:827988ms step_avg:3435.64ms
990
+ step:252/6000 train_loss:5.1456 train_time:831406ms step_avg:3435.56ms
991
+ step:253/6000 train_loss:5.5313 train_time:834824ms step_avg:3435.49ms
992
+ step:254/6000 train_loss:5.2743 train_time:838240ms step_avg:3435.41ms
993
+ step:255/6000 train_loss:5.3182 train_time:841660ms step_avg:3435.35ms
994
+ step:256/6000 train_loss:5.2744 train_time:845082ms step_avg:3435.29ms
995
+ step:257/6000 train_loss:5.2935 train_time:848504ms step_avg:3435.24ms
996
+ step:258/6000 train_loss:5.1391 train_time:851929ms step_avg:3435.20ms
997
+ step:259/6000 train_loss:5.3879 train_time:855355ms step_avg:3435.16ms
998
+ step:260/6000 train_loss:5.2551 train_time:858784ms step_avg:3435.14ms
999
+ step:261/6000 train_loss:5.4110 train_time:862210ms step_avg:3435.10ms
1000
+ step:262/6000 train_loss:5.3616 train_time:865637ms step_avg:3435.07ms
1001
+ step:263/6000 train_loss:5.3344 train_time:869062ms step_avg:3435.03ms
1002
+ step:264/6000 train_loss:5.1001 train_time:872486ms step_avg:3434.99ms
1003
+ step:265/6000 train_loss:5.2383 train_time:875905ms step_avg:3434.92ms
1004
+ step:266/6000 train_loss:5.4192 train_time:879318ms step_avg:3434.84ms
1005
+ step:267/6000 train_loss:5.4532 train_time:882726ms step_avg:3434.73ms
1006
+ step:268/6000 train_loss:6.9081 train_time:886133ms step_avg:3434.62ms
1007
+ step:269/6000 train_loss:5.1305 train_time:889540ms step_avg:3434.52ms
1008
+ step:270/6000 train_loss:5.2639 train_time:892947ms step_avg:3434.41ms
1009
+ step:271/6000 train_loss:5.3229 train_time:896352ms step_avg:3434.30ms
1010
+ step:272/6000 train_loss:5.2331 train_time:899757ms step_avg:3434.19ms
1011
+ step:273/6000 train_loss:5.4332 train_time:903162ms step_avg:3434.08ms
1012
+ step:274/6000 train_loss:5.3627 train_time:906566ms step_avg:3433.96ms
1013
+ step:275/6000 train_loss:5.1428 train_time:909974ms step_avg:3433.86ms
1014
+ step:276/6000 train_loss:5.3207 train_time:913382ms step_avg:3433.77ms
1015
+ step:277/6000 train_loss:4.9842 train_time:916788ms step_avg:3433.66ms
1016
+ step:278/6000 train_loss:5.1754 train_time:920196ms step_avg:3433.57ms
1017
+ step:279/6000 train_loss:5.2977 train_time:923605ms step_avg:3433.47ms
1018
+ step:280/6000 train_loss:5.2565 train_time:927013ms step_avg:3433.38ms
1019
+ step:281/6000 train_loss:5.3619 train_time:930419ms step_avg:3433.28ms
1020
+ step:282/6000 train_loss:5.1270 train_time:933826ms step_avg:3433.18ms
1021
+ step:283/6000 train_loss:5.1333 train_time:937234ms step_avg:3433.09ms
1022
+ step:284/6000 train_loss:4.9990 train_time:940642ms step_avg:3433.00ms
1023
+ step:285/6000 train_loss:5.1462 train_time:944052ms step_avg:3432.92ms
1024
+ step:286/6000 train_loss:5.1908 train_time:947460ms step_avg:3432.83ms
1025
+ step:287/6000 train_loss:5.1320 train_time:950870ms step_avg:3432.74ms
1026
+ step:288/6000 train_loss:5.0049 train_time:954278ms step_avg:3432.65ms
1027
+ step:289/6000 train_loss:5.1106 train_time:957685ms step_avg:3432.56ms
1028
+ step:290/6000 train_loss:5.1938 train_time:961094ms step_avg:3432.48ms
1029
+ step:291/6000 train_loss:5.2408 train_time:964502ms step_avg:3432.39ms
1030
+ step:292/6000 train_loss:5.1533 train_time:967912ms step_avg:3432.31ms
1031
+ step:293/6000 train_loss:5.3285 train_time:971320ms step_avg:3432.23ms
1032
+ step:294/6000 train_loss:5.1832 train_time:974732ms step_avg:3432.15ms
1033
+ step:295/6000 train_loss:5.4237 train_time:978141ms step_avg:3432.07ms
1034
+ step:296/6000 train_loss:5.1898 train_time:981550ms step_avg:3431.99ms
1035
+ step:297/6000 train_loss:5.4226 train_time:984962ms step_avg:3431.92ms
1036
+ step:298/6000 train_loss:5.1682 train_time:988372ms step_avg:3431.85ms
1037
+ step:299/6000 train_loss:5.5279 train_time:991780ms step_avg:3431.76ms
1038
+ step:300/6000 train_loss:5.0578 train_time:995193ms step_avg:3431.70ms
1039
+ step:301/6000 train_loss:5.0428 train_time:998602ms step_avg:3431.62ms
1040
+ step:302/6000 train_loss:5.0813 train_time:1002013ms step_avg:3431.55ms
1041
+ step:303/6000 train_loss:4.9587 train_time:1005424ms step_avg:3431.48ms
1042
+ step:304/6000 train_loss:5.3799 train_time:1008832ms step_avg:3431.40ms
1043
+ step:305/6000 train_loss:4.8587 train_time:1015284ms step_avg:3441.64ms
1044
+ step:306/6000 train_loss:5.0054 train_time:1018692ms step_avg:3441.53ms
1045
+ step:307/6000 train_loss:5.0133 train_time:1022097ms step_avg:3441.40ms
1046
+ step:308/6000 train_loss:5.2828 train_time:1025507ms step_avg:3441.30ms
1047
+ step:309/6000 train_loss:5.0624 train_time:1028912ms step_avg:3441.18ms
1048
+ step:310/6000 train_loss:5.0572 train_time:1032319ms step_avg:3441.06ms
1049
+ step:311/6000 train_loss:4.9191 train_time:1035721ms step_avg:3440.93ms
1050
+ step:312/6000 train_loss:5.0762 train_time:1039125ms step_avg:3440.81ms
1051
+ step:313/6000 train_loss:5.0400 train_time:1042526ms step_avg:3440.68ms
1052
+ step:314/6000 train_loss:4.8921 train_time:1045925ms step_avg:3440.54ms
1053
+ step:315/6000 train_loss:4.8536 train_time:1049322ms step_avg:3440.40ms
1054
+ step:316/6000 train_loss:5.1940 train_time:1052715ms step_avg:3440.25ms
1055
+ step:317/6000 train_loss:5.0798 train_time:1056110ms step_avg:3440.10ms
1056
+ step:318/6000 train_loss:5.1254 train_time:1059503ms step_avg:3439.94ms
1057
+ step:319/6000 train_loss:4.9326 train_time:1062897ms step_avg:3439.80ms
1058
+ step:320/6000 train_loss:4.9668 train_time:1066385ms step_avg:3439.95ms
1059
+ step:321/6000 train_loss:4.9541 train_time:1069778ms step_avg:3439.80ms
1060
+ step:322/6000 train_loss:4.9330 train_time:1073172ms step_avg:3439.65ms
1061
+ step:323/6000 train_loss:5.1838 train_time:1076565ms step_avg:3439.50ms
1062
+ step:324/6000 train_loss:4.9301 train_time:1079956ms step_avg:3439.35ms
1063
+ step:325/6000 train_loss:5.0161 train_time:1083349ms step_avg:3439.20ms
1064
+ step:326/6000 train_loss:5.3797 train_time:1086741ms step_avg:3439.05ms
1065
+ step:327/6000 train_loss:4.9991 train_time:1090132ms step_avg:3438.90ms
1066
+ step:328/6000 train_loss:5.4944 train_time:1093530ms step_avg:3438.77ms
1067
+ step:329/6000 train_loss:4.8390 train_time:1096924ms step_avg:3438.63ms
1068
+ step:330/6000 train_loss:4.9219 train_time:1100319ms step_avg:3438.50ms
1069
+ step:331/6000 train_loss:5.1130 train_time:1103712ms step_avg:3438.36ms
1070
+ step:332/6000 train_loss:5.0294 train_time:1107103ms step_avg:3438.21ms
1071
+ step:333/6000 train_loss:4.9084 train_time:1110498ms step_avg:3438.08ms
1072
+ step:334/6000 train_loss:5.0865 train_time:1113893ms step_avg:3437.94ms
1073
+ step:335/6000 train_loss:4.7078 train_time:1117285ms step_avg:3437.80ms
1074
+ step:336/6000 train_loss:4.9595 train_time:1120681ms step_avg:3437.67ms
1075
+ step:337/6000 train_loss:5.0856 train_time:1124075ms step_avg:3437.54ms
1076
+ step:338/6000 train_loss:4.9088 train_time:1127727ms step_avg:3438.19ms
1077
+ step:339/6000 train_loss:4.7798 train_time:1131125ms step_avg:3438.07ms
1078
+ step:340/6000 train_loss:4.8553 train_time:1134525ms step_avg:3437.96ms
1079
+ step:341/6000 train_loss:5.0935 train_time:1137921ms step_avg:3437.83ms
1080
+ step:342/6000 train_loss:4.9783 train_time:1141319ms step_avg:3437.71ms
1081
+ step:343/6000 train_loss:5.4003 train_time:1144720ms step_avg:3437.60ms
1082
+ step:344/6000 train_loss:4.8966 train_time:1148118ms step_avg:3437.48ms
1083
+ step:345/6000 train_loss:4.8369 train_time:1151516ms step_avg:3437.36ms
1084
+ step:346/6000 train_loss:5.4137 train_time:1154916ms step_avg:3437.25ms
1085
+ step:347/6000 train_loss:5.0305 train_time:1158313ms step_avg:3437.13ms
1086
+ step:348/6000 train_loss:4.7383 train_time:1161713ms step_avg:3437.02ms
1087
+ step:349/6000 train_loss:4.9226 train_time:1165111ms step_avg:3436.91ms
1088
+ step:350/6000 train_loss:4.9266 train_time:1168509ms step_avg:3436.79ms
1089
+ step:351/6000 train_loss:4.8785 train_time:1171910ms step_avg:3436.69ms
1090
+ step:352/6000 train_loss:4.7483 train_time:1175309ms step_avg:3436.58ms
1091
+ step:353/6000 train_loss:4.8046 train_time:1178708ms step_avg:3436.47ms
1092
+ step:354/6000 train_loss:4.8389 train_time:1182109ms step_avg:3436.36ms
1093
+ step:355/6000 train_loss:4.8083 train_time:1185509ms step_avg:3436.26ms
1094
+ step:356/6000 train_loss:5.0028 train_time:1188908ms step_avg:3436.15ms
1095
+ step:357/6000 train_loss:4.7255 train_time:1192311ms step_avg:3436.05ms
1096
+ step:358/6000 train_loss:4.4875 train_time:1195713ms step_avg:3435.96ms
1097
+ step:359/6000 train_loss:5.3345 train_time:1199114ms step_avg:3435.86ms
1098
+ step:360/6000 train_loss:4.8856 train_time:1202518ms step_avg:3435.76ms
1099
+ step:361/6000 train_loss:4.7809 train_time:1205918ms step_avg:3435.66ms
1100
+ step:362/6000 train_loss:4.8236 train_time:1209320ms step_avg:3435.57ms
1101
+ step:363/6000 train_loss:4.8012 train_time:1212722ms step_avg:3435.47ms
1102
+ step:364/6000 train_loss:4.7757 train_time:1216124ms step_avg:3435.38ms
1103
+ step:365/6000 train_loss:4.8710 train_time:1219528ms step_avg:3435.29ms
1104
+ step:366/6000 train_loss:4.9393 train_time:1222929ms step_avg:3435.19ms
1105
+ step:367/6000 train_loss:4.6886 train_time:1226332ms step_avg:3435.10ms
1106
+ step:368/6000 train_loss:5.2882 train_time:1229733ms step_avg:3435.01ms
1107
+ step:369/6000 train_loss:4.8391 train_time:1233134ms step_avg:3434.91ms
1108
+ step:370/6000 train_loss:4.6043 train_time:1236537ms step_avg:3434.83ms
1109
+ step:371/6000 train_loss:4.7194 train_time:1239937ms step_avg:3434.73ms
1110
+ step:372/6000 train_loss:4.8174 train_time:1243338ms step_avg:3434.63ms
1111
+ step:373/6000 train_loss:4.7960 train_time:1246740ms step_avg:3434.55ms
1112
+ step:374/6000 train_loss:4.8462 train_time:1250142ms step_avg:3434.46ms
1113
+ step:375/6000 train_loss:4.7751 train_time:1253542ms step_avg:3434.36ms
1114
+ step:375/6000 val_loss:4.7849 train_time:1253642ms step_avg:3434.64ms
1115
+ step:376/6000 train_loss:4.6820 train_time:1256946ms step_avg:3434.28ms
1116
+ step:377/6000 train_loss:4.8238 train_time:1260351ms step_avg:3434.20ms
1117
+ step:378/6000 train_loss:4.6150 train_time:1263756ms step_avg:3434.12ms
1118
+ step:379/6000 train_loss:4.7386 train_time:1267158ms step_avg:3434.03ms
1119
+ step:380/6000 train_loss:4.5651 train_time:1270563ms step_avg:3433.96ms
1120
+ step:381/6000 train_loss:4.8766 train_time:1273967ms step_avg:3433.87ms
1121
+ step:382/6000 train_loss:4.8782 train_time:1277370ms step_avg:3433.79ms
1122
+ step:383/6000 train_loss:4.8772 train_time:1280772ms step_avg:3433.71ms
1123
+ step:384/6000 train_loss:4.9635 train_time:1284175ms step_avg:3433.62ms
1124
+ step:385/6000 train_loss:4.7697 train_time:1287580ms step_avg:3433.55ms
1125
+ step:386/6000 train_loss:4.6847 train_time:1290981ms step_avg:3433.46ms
1126
+ step:387/6000 train_loss:4.8016 train_time:1294383ms step_avg:3433.38ms
1127
+ step:388/6000 train_loss:5.0025 train_time:1297784ms step_avg:3433.29ms
1128
+ step:389/6000 train_loss:4.7109 train_time:1301187ms step_avg:3433.21ms
1129
+ step:390/6000 train_loss:4.9732 train_time:1304589ms step_avg:3433.13ms
1130
+ step:391/6000 train_loss:4.9900 train_time:1307990ms step_avg:3433.04ms
1131
+ step:392/6000 train_loss:4.7626 train_time:1311391ms step_avg:3432.96ms
1132
+ step:393/6000 train_loss:4.8017 train_time:1314793ms step_avg:3432.88ms
1133
+ step:394/6000 train_loss:4.7786 train_time:1318195ms step_avg:3432.80ms
1134
+ step:395/6000 train_loss:4.8375 train_time:1321598ms step_avg:3432.72ms
1135
+ step:396/6000 train_loss:4.7687 train_time:1324999ms step_avg:3432.64ms
1136
+ step:397/6000 train_loss:5.4127 train_time:1328401ms step_avg:3432.56ms
1137
+ step:398/6000 train_loss:4.6990 train_time:1331803ms step_avg:3432.48ms
1138
+ step:399/6000 train_loss:4.5973 train_time:1335207ms step_avg:3432.41ms
1139
+ step:400/6000 train_loss:4.7991 train_time:1338611ms step_avg:3432.34ms
1140
+ step:401/6000 train_loss:4.7391 train_time:1342014ms step_avg:3432.26ms
1141
+ step:402/6000 train_loss:4.7799 train_time:1345418ms step_avg:3432.19ms
1142
+ step:403/6000 train_loss:5.2799 train_time:1348820ms step_avg:3432.11ms
1143
+ step:404/6000 train_loss:4.6204 train_time:1352224ms step_avg:3432.04ms
1144
+ step:405/6000 train_loss:4.8722 train_time:1355627ms step_avg:3431.97ms
1145
+ step:406/6000 train_loss:4.6097 train_time:1359028ms step_avg:3431.89ms
1146
+ step:407/6000 train_loss:4.6709 train_time:1365494ms step_avg:3439.53ms
1147
+ step:408/6000 train_loss:4.8273 train_time:1368897ms step_avg:3439.44ms
1148
+ step:409/6000 train_loss:4.9456 train_time:1372300ms step_avg:3439.35ms
1149
+ step:410/6000 train_loss:4.6677 train_time:1375708ms step_avg:3439.27ms
1150
+ step:411/6000 train_loss:4.6155 train_time:1379117ms step_avg:3439.19ms
1151
+ step:412/6000 train_loss:4.9266 train_time:1382528ms step_avg:3439.12ms
1152
+ step:413/6000 train_loss:4.6171 train_time:1385938ms step_avg:3439.05ms
1153
+ step:414/6000 train_loss:4.7515 train_time:1389349ms step_avg:3438.98ms
1154
+ step:415/6000 train_loss:4.4478 train_time:1392762ms step_avg:3438.92ms
1155
+ step:416/6000 train_loss:4.7730 train_time:1396174ms step_avg:3438.85ms
1156
+ step:417/6000 train_loss:4.7134 train_time:1399585ms step_avg:3438.78ms
1157
+ step:418/6000 train_loss:4.6088 train_time:1402992ms step_avg:3438.71ms
1158
+ step:419/6000 train_loss:4.4995 train_time:1406398ms step_avg:3438.63ms
1159
+ step:420/6000 train_loss:4.1972 train_time:1409803ms step_avg:3438.54ms
1160
+ step:421/6000 train_loss:4.6705 train_time:1413205ms step_avg:3438.46ms
1161
+ step:422/6000 train_loss:4.6486 train_time:1416608ms step_avg:3438.37ms
1162
+ step:423/6000 train_loss:4.5639 train_time:1420008ms step_avg:3438.28ms
1163
+ step:424/6000 train_loss:4.6561 train_time:1423410ms step_avg:3438.19ms
1164
+ step:425/6000 train_loss:4.4786 train_time:1426809ms step_avg:3438.09ms
1165
+ step:426/6000 train_loss:4.6403 train_time:1430213ms step_avg:3438.01ms
1166
+ step:427/6000 train_loss:4.7093 train_time:1433615ms step_avg:3437.92ms
1167
+ step:428/6000 train_loss:4.6496 train_time:1437016ms step_avg:3437.84ms
1168
+ step:429/6000 train_loss:4.6269 train_time:1440416ms step_avg:3437.75ms
logs_new_MUON_large_reshape_svd_gated/svd/mode_5_param_gated_seed_44/training_log_71322229-20fb-4bd6-8ca5-98de9717fbce.txt ADDED
The diff for this file is too large to render. See raw diff
 
sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.01_seed_42/config.json CHANGED
@@ -20,6 +20,6 @@
20
  "val_loss_every": 400,
21
  "save_checkpoint": false
22
  },
23
- "run_uuid_for_log": "da4207c8-70c2-4886-bf2f-a09da8f9b132",
24
  "script_code_logged_at_start": true
25
  }
 
20
  "val_loss_every": 400,
21
  "save_checkpoint": false
22
  },
23
+ "run_uuid_for_log": "b8831a4c-12f0-4a61-ae8b-4f32def89ede",
24
  "script_code_logged_at_start": true
25
  }
sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.01_seed_42/training_log_b8831a4c-12f0-4a61-ae8b-4f32def89ede.txt ADDED
The diff for this file is too large to render. See raw diff
 
sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.01_seed_43/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "unet": false,
4
+ "seed": 43,
5
+ "optimizer_mode": 0,
6
+ "model_parameterization": "qkvo",
7
+ "adam_lr": 0.001,
8
+ "muon_lr": 0.01,
9
+ "base_dir": "logs_qkvo_grid_fix/mode_0"
10
+ },
11
+ "hyperparameters": {
12
+ "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
13
+ "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
14
+ "val_tokens": 491520,
15
+ "train_seq_len": 3072,
16
+ "val_seq_len": 16384,
17
+ "num_iterations": 10000,
18
+ "cooldown_frac": 0.4,
19
+ "vocab_size": 50257,
20
+ "val_loss_every": 400,
21
+ "save_checkpoint": false
22
+ },
23
+ "run_uuid_for_log": "13322988-b0ae-4e6e-81c8-a19f29460fb7",
24
+ "script_code_logged_at_start": true
25
+ }
sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.01_seed_43/training_log_13322988-b0ae-4e6e-81c8-a19f29460fb7.txt ADDED
The diff for this file is too large to render. See raw diff
 
sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.01_seed_45/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "unet": false,
4
+ "seed": 45,
5
+ "optimizer_mode": 0,
6
+ "model_parameterization": "qkvo",
7
+ "adam_lr": 0.001,
8
+ "muon_lr": 0.01,
9
+ "base_dir": "logs_qkvo_grid_fix/mode_0"
10
+ },
11
+ "hyperparameters": {
12
+ "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
13
+ "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
14
+ "val_tokens": 491520,
15
+ "train_seq_len": 3072,
16
+ "val_seq_len": 16384,
17
+ "num_iterations": 10000,
18
+ "cooldown_frac": 0.4,
19
+ "vocab_size": 50257,
20
+ "val_loss_every": 400,
21
+ "save_checkpoint": false
22
+ },
23
+ "run_uuid_for_log": "f3c2cbd7-4390-4795-8459-adb334970d83",
24
+ "script_code_logged_at_start": true
25
+ }
sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.01_seed_45/training_log_f3c2cbd7-4390-4795-8459-adb334970d83.txt ADDED
The diff for this file is too large to render. See raw diff
 
sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.01_seed_46/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "unet": false,
4
+ "seed": 46,
5
+ "optimizer_mode": 0,
6
+ "model_parameterization": "qkvo",
7
+ "adam_lr": 0.001,
8
+ "muon_lr": 0.01,
9
+ "base_dir": "logs_qkvo_grid_fix/mode_0"
10
+ },
11
+ "hyperparameters": {
12
+ "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
13
+ "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
14
+ "val_tokens": 491520,
15
+ "train_seq_len": 3072,
16
+ "val_seq_len": 16384,
17
+ "num_iterations": 10000,
18
+ "cooldown_frac": 0.4,
19
+ "vocab_size": 50257,
20
+ "val_loss_every": 400,
21
+ "save_checkpoint": false
22
+ },
23
+ "run_uuid_for_log": "8c5ef51b-70e7-40a7-8fcb-ab0c5dbcbca6",
24
+ "script_code_logged_at_start": true
25
+ }
sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.01_seed_46/training_log_8c5ef51b-70e7-40a7-8fcb-ab0c5dbcbca6.txt ADDED
The diff for this file is too large to render. See raw diff
 
sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.02_seed_42/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "unet": false,
4
+ "seed": 42,
5
+ "optimizer_mode": 0,
6
+ "model_parameterization": "qkvo",
7
+ "adam_lr": 0.001,
8
+ "muon_lr": 0.02,
9
+ "base_dir": "logs_qkvo_grid_fix/mode_0"
10
+ },
11
+ "hyperparameters": {
12
+ "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
13
+ "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
14
+ "val_tokens": 491520,
15
+ "train_seq_len": 3072,
16
+ "val_seq_len": 16384,
17
+ "num_iterations": 10000,
18
+ "cooldown_frac": 0.4,
19
+ "vocab_size": 50257,
20
+ "val_loss_every": 400,
21
+ "save_checkpoint": false
22
+ },
23
+ "run_uuid_for_log": "535acb30-1e05-43d0-8a6e-f7fdd7e93523",
24
+ "script_code_logged_at_start": true
25
+ }
sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.02_seed_42/training_log_535acb30-1e05-43d0-8a6e-f7fdd7e93523.txt ADDED
The diff for this file is too large to render. See raw diff
 
sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.02_seed_43/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "unet": false,
4
+ "seed": 43,
5
+ "optimizer_mode": 0,
6
+ "model_parameterization": "qkvo",
7
+ "adam_lr": 0.001,
8
+ "muon_lr": 0.02,
9
+ "base_dir": "logs_qkvo_grid_fix/mode_0"
10
+ },
11
+ "hyperparameters": {
12
+ "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
13
+ "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
14
+ "val_tokens": 491520,
15
+ "train_seq_len": 3072,
16
+ "val_seq_len": 16384,
17
+ "num_iterations": 10000,
18
+ "cooldown_frac": 0.4,
19
+ "vocab_size": 50257,
20
+ "val_loss_every": 400,
21
+ "save_checkpoint": false
22
+ },
23
+ "run_uuid_for_log": "51a5fccc-1092-4e8f-b775-94e3f7f8f701",
24
+ "script_code_logged_at_start": true
25
+ }
sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.02_seed_43/training_log_51a5fccc-1092-4e8f-b775-94e3f7f8f701.txt ADDED
The diff for this file is too large to render. See raw diff
 
sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.02_seed_45/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "unet": false,
4
+ "seed": 45,
5
+ "optimizer_mode": 0,
6
+ "model_parameterization": "qkvo",
7
+ "adam_lr": 0.001,
8
+ "muon_lr": 0.02,
9
+ "base_dir": "logs_qkvo_grid_fix/mode_0"
10
+ },
11
+ "hyperparameters": {
12
+ "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
13
+ "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
14
+ "val_tokens": 491520,
15
+ "train_seq_len": 3072,
16
+ "val_seq_len": 16384,
17
+ "num_iterations": 10000,
18
+ "cooldown_frac": 0.4,
19
+ "vocab_size": 50257,
20
+ "val_loss_every": 400,
21
+ "save_checkpoint": false
22
+ },
23
+ "run_uuid_for_log": "6b55014d-0163-4948-acef-1bd7c6fac142",
24
+ "script_code_logged_at_start": true
25
+ }
sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.02_seed_45/training_log_6b55014d-0163-4948-acef-1bd7c6fac142.txt ADDED
The diff for this file is too large to render. See raw diff
 
sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.02_seed_46/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "unet": false,
4
+ "seed": 46,
5
+ "optimizer_mode": 0,
6
+ "model_parameterization": "qkvo",
7
+ "adam_lr": 0.001,
8
+ "muon_lr": 0.02,
9
+ "base_dir": "logs_qkvo_grid_fix/mode_0"
10
+ },
11
+ "hyperparameters": {
12
+ "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
13
+ "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
14
+ "val_tokens": 491520,
15
+ "train_seq_len": 3072,
16
+ "val_seq_len": 16384,
17
+ "num_iterations": 10000,
18
+ "cooldown_frac": 0.4,
19
+ "vocab_size": 50257,
20
+ "val_loss_every": 400,
21
+ "save_checkpoint": false
22
+ },
23
+ "run_uuid_for_log": "14b69eae-8a7c-4dbc-8bac-b2aab72b48a7",
24
+ "script_code_logged_at_start": true
25
+ }
sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.02_seed_46/training_log_14b69eae-8a7c-4dbc-8bac-b2aab72b48a7.txt ADDED
The diff for this file is too large to render. See raw diff
 
sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.05_seed_42/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "unet": false,
4
+ "seed": 42,
5
+ "optimizer_mode": 0,
6
+ "model_parameterization": "qkvo",
7
+ "adam_lr": 0.001,
8
+ "muon_lr": 0.05,
9
+ "base_dir": "logs_qkvo_grid_fix/mode_0"
10
+ },
11
+ "hyperparameters": {
12
+ "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
13
+ "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
14
+ "val_tokens": 491520,
15
+ "train_seq_len": 3072,
16
+ "val_seq_len": 16384,
17
+ "num_iterations": 10000,
18
+ "cooldown_frac": 0.4,
19
+ "vocab_size": 50257,
20
+ "val_loss_every": 400,
21
+ "save_checkpoint": false
22
+ },
23
+ "run_uuid_for_log": "ff0f11cb-7fdd-4119-a532-b5b040bdd0b2",
24
+ "script_code_logged_at_start": true
25
+ }
sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.05_seed_42/training_log_ff0f11cb-7fdd-4119-a532-b5b040bdd0b2.txt ADDED
The diff for this file is too large to render. See raw diff
 
sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.05_seed_43/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "unet": false,
4
+ "seed": 43,
5
+ "optimizer_mode": 0,
6
+ "model_parameterization": "qkvo",
7
+ "adam_lr": 0.001,
8
+ "muon_lr": 0.05,
9
+ "base_dir": "logs_qkvo_grid_fix/mode_0"
10
+ },
11
+ "hyperparameters": {
12
+ "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
13
+ "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
14
+ "val_tokens": 491520,
15
+ "train_seq_len": 3072,
16
+ "val_seq_len": 16384,
17
+ "num_iterations": 10000,
18
+ "cooldown_frac": 0.4,
19
+ "vocab_size": 50257,
20
+ "val_loss_every": 400,
21
+ "save_checkpoint": false
22
+ },
23
+ "run_uuid_for_log": "74fa81b7-209f-4653-9e84-0c69fb6fc666",
24
+ "script_code_logged_at_start": true
25
+ }
sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.05_seed_43/training_log_74fa81b7-209f-4653-9e84-0c69fb6fc666.txt ADDED
The diff for this file is too large to render. See raw diff
 
sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.05_seed_45/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "unet": false,
4
+ "seed": 45,
5
+ "optimizer_mode": 0,
6
+ "model_parameterization": "qkvo",
7
+ "adam_lr": 0.001,
8
+ "muon_lr": 0.05,
9
+ "base_dir": "logs_qkvo_grid_fix/mode_0"
10
+ },
11
+ "hyperparameters": {
12
+ "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
13
+ "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
14
+ "val_tokens": 491520,
15
+ "train_seq_len": 3072,
16
+ "val_seq_len": 16384,
17
+ "num_iterations": 10000,
18
+ "cooldown_frac": 0.4,
19
+ "vocab_size": 50257,
20
+ "val_loss_every": 400,
21
+ "save_checkpoint": false
22
+ },
23
+ "run_uuid_for_log": "15456da8-2f1d-4ae5-965d-2481be0b71f3",
24
+ "script_code_logged_at_start": true
25
+ }
sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.05_seed_45/training_log_15456da8-2f1d-4ae5-965d-2481be0b71f3.txt ADDED
The diff for this file is too large to render. See raw diff
 
sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.05_seed_46/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "unet": false,
4
+ "seed": 46,
5
+ "optimizer_mode": 0,
6
+ "model_parameterization": "qkvo",
7
+ "adam_lr": 0.001,
8
+ "muon_lr": 0.05,
9
+ "base_dir": "logs_qkvo_grid_fix/mode_0"
10
+ },
11
+ "hyperparameters": {
12
+ "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
13
+ "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
14
+ "val_tokens": 491520,
15
+ "train_seq_len": 3072,
16
+ "val_seq_len": 16384,
17
+ "num_iterations": 10000,
18
+ "cooldown_frac": 0.4,
19
+ "vocab_size": 50257,
20
+ "val_loss_every": 400,
21
+ "save_checkpoint": false
22
+ },
23
+ "run_uuid_for_log": "a54cc0c5-fdb0-4fe6-a467-7df0739500c8",
24
+ "script_code_logged_at_start": true
25
+ }
sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.05_seed_46/training_log_a54cc0c5-fdb0-4fe6-a467-7df0739500c8.txt ADDED
The diff for this file is too large to render. See raw diff
 
sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.1_seed_42/config.json CHANGED
@@ -20,6 +20,6 @@
20
  "val_loss_every": 400,
21
  "save_checkpoint": false
22
  },
23
- "run_uuid_for_log": "a34e4e8b-a0cd-4255-8913-e1f97f4711b0",
24
  "script_code_logged_at_start": true
25
  }
 
20
  "val_loss_every": 400,
21
  "save_checkpoint": false
22
  },
23
+ "run_uuid_for_log": "fab5639f-ba5b-4d9f-a38c-69aa47967c84",
24
  "script_code_logged_at_start": true
25
  }
sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.1_seed_42/training_log_fab5639f-ba5b-4d9f-a38c-69aa47967c84.txt ADDED
The diff for this file is too large to render. See raw diff
 
sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.1_seed_43/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "unet": false,
4
+ "seed": 43,
5
+ "optimizer_mode": 0,
6
+ "model_parameterization": "qkvo",
7
+ "adam_lr": 0.001,
8
+ "muon_lr": 0.1,
9
+ "base_dir": "logs_qkvo_grid_fix/mode_0"
10
+ },
11
+ "hyperparameters": {
12
+ "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
13
+ "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
14
+ "val_tokens": 491520,
15
+ "train_seq_len": 3072,
16
+ "val_seq_len": 16384,
17
+ "num_iterations": 10000,
18
+ "cooldown_frac": 0.4,
19
+ "vocab_size": 50257,
20
+ "val_loss_every": 400,
21
+ "save_checkpoint": false
22
+ },
23
+ "run_uuid_for_log": "c98c52eb-2b46-4f88-badc-890fc51959c2",
24
+ "script_code_logged_at_start": true
25
+ }
sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.1_seed_43/training_log_c98c52eb-2b46-4f88-badc-890fc51959c2.txt ADDED
The diff for this file is too large to render. See raw diff
 
sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.1_seed_45/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "unet": false,
4
+ "seed": 45,
5
+ "optimizer_mode": 0,
6
+ "model_parameterization": "qkvo",
7
+ "adam_lr": 0.001,
8
+ "muon_lr": 0.1,
9
+ "base_dir": "logs_qkvo_grid_fix/mode_0"
10
+ },
11
+ "hyperparameters": {
12
+ "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
13
+ "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
14
+ "val_tokens": 491520,
15
+ "train_seq_len": 3072,
16
+ "val_seq_len": 16384,
17
+ "num_iterations": 10000,
18
+ "cooldown_frac": 0.4,
19
+ "vocab_size": 50257,
20
+ "val_loss_every": 400,
21
+ "save_checkpoint": false
22
+ },
23
+ "run_uuid_for_log": "ef7bf1f7-de55-4a4b-a0ab-27702430dc8d",
24
+ "script_code_logged_at_start": true
25
+ }
sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.1_seed_45/training_log_ef7bf1f7-de55-4a4b-a0ab-27702430dc8d.txt ADDED
The diff for this file is too large to render. See raw diff
 
sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.1_seed_46/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "unet": false,
4
+ "seed": 46,
5
+ "optimizer_mode": 0,
6
+ "model_parameterization": "qkvo",
7
+ "adam_lr": 0.001,
8
+ "muon_lr": 0.1,
9
+ "base_dir": "logs_qkvo_grid_fix/mode_0"
10
+ },
11
+ "hyperparameters": {
12
+ "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
13
+ "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
14
+ "val_tokens": 491520,
15
+ "train_seq_len": 3072,
16
+ "val_seq_len": 16384,
17
+ "num_iterations": 10000,
18
+ "cooldown_frac": 0.4,
19
+ "vocab_size": 50257,
20
+ "val_loss_every": 400,
21
+ "save_checkpoint": false
22
+ },
23
+ "run_uuid_for_log": "7b0e9962-3dc8-4d4b-9148-5ebf758f1505",
24
+ "script_code_logged_at_start": true
25
+ }
sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.001_muon_lr_0.1_seed_46/training_log_7b0e9962-3dc8-4d4b-9148-5ebf758f1505.txt ADDED
The diff for this file is too large to render. See raw diff
 
sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.01_seed_42/config.json CHANGED
@@ -20,6 +20,6 @@
20
  "val_loss_every": 400,
21
  "save_checkpoint": false
22
  },
23
- "run_uuid_for_log": "4c66d719-6cf5-4687-958d-d269ff6045a1",
24
  "script_code_logged_at_start": true
25
  }
 
20
  "val_loss_every": 400,
21
  "save_checkpoint": false
22
  },
23
+ "run_uuid_for_log": "980127d3-ed72-4576-88df-d5d63142f40f",
24
  "script_code_logged_at_start": true
25
  }
sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.01_seed_42/training_log_2b27f121-8999-4983-b952-012571b08358.txt ADDED
The diff for this file is too large to render. See raw diff
 
sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.01_seed_42/training_log_980127d3-ed72-4576-88df-d5d63142f40f.txt ADDED
The diff for this file is too large to render. See raw diff
 
sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.01_seed_43/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "unet": false,
4
+ "seed": 43,
5
+ "optimizer_mode": 0,
6
+ "model_parameterization": "qkvo",
7
+ "adam_lr": 0.002,
8
+ "muon_lr": 0.01,
9
+ "base_dir": "logs_qkvo_grid_fix/mode_0"
10
+ },
11
+ "hyperparameters": {
12
+ "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
13
+ "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
14
+ "val_tokens": 491520,
15
+ "train_seq_len": 3072,
16
+ "val_seq_len": 16384,
17
+ "num_iterations": 10000,
18
+ "cooldown_frac": 0.4,
19
+ "vocab_size": 50257,
20
+ "val_loss_every": 400,
21
+ "save_checkpoint": false
22
+ },
23
+ "run_uuid_for_log": "6f3da0f7-185f-42a1-a36f-b3e49dc3211c",
24
+ "script_code_logged_at_start": true
25
+ }
sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.01_seed_43/training_log_6f3da0f7-185f-42a1-a36f-b3e49dc3211c.txt ADDED
The diff for this file is too large to render. See raw diff
 
sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.01_seed_45/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_args": {
3
+ "unet": false,
4
+ "seed": 45,
5
+ "optimizer_mode": 0,
6
+ "model_parameterization": "qkvo",
7
+ "adam_lr": 0.002,
8
+ "muon_lr": 0.01,
9
+ "base_dir": "logs_qkvo_grid_fix/mode_0"
10
+ },
11
+ "hyperparameters": {
12
+ "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
13
+ "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
14
+ "val_tokens": 491520,
15
+ "train_seq_len": 3072,
16
+ "val_seq_len": 16384,
17
+ "num_iterations": 10000,
18
+ "cooldown_frac": 0.4,
19
+ "vocab_size": 50257,
20
+ "val_loss_every": 400,
21
+ "save_checkpoint": false
22
+ },
23
+ "run_uuid_for_log": "a2a2964f-b4b1-4192-b944-13a0b2cbcc4f",
24
+ "script_code_logged_at_start": true
25
+ }
sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.01_seed_45/training_log_43310052-95d8-45d5-a51d-d782e89944ba.txt ADDED
The diff for this file is too large to render. See raw diff
 
sharpness/logs_qkvo_grid_fix/mode_0/mode_0_param_qkvo_adam_lr_0.002_muon_lr_0.01_seed_45/training_log_a2a2964f-b4b1-4192-b944-13a0b2cbcc4f.txt ADDED
The diff for this file is too large to render. See raw diff